var/home/core/zuul-output/0000755000175000017500000000000015112320111014510 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015112324563015473 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000004674542515112324555017721 0ustar rootrootNov 28 13:18:27 crc systemd[1]: Starting Kubernetes Kubelet... Nov 28 13:18:27 crc restorecon[4743]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:18:27 crc restorecon[4743]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:18:27 crc restorecon[4743]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 28 13:18:28 crc kubenswrapper[4857]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 13:18:28 crc kubenswrapper[4857]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 28 13:18:28 crc kubenswrapper[4857]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 13:18:28 crc kubenswrapper[4857]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 13:18:28 crc kubenswrapper[4857]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 28 13:18:28 crc kubenswrapper[4857]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.111304 4857 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115093 4857 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115116 4857 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115123 4857 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115130 4857 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115140 4857 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115148 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115155 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115161 4857 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115167 4857 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115172 4857 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115177 4857 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115182 4857 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115187 4857 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115192 4857 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115199 4857 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115205 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115211 4857 feature_gate.go:330] unrecognized feature gate: Example Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115217 4857 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115222 4857 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115228 4857 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115233 4857 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115237 4857 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115242 4857 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115248 4857 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115253 4857 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115259 4857 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115265 4857 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115271 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115286 4857 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115292 4857 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115298 4857 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115303 4857 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115308 4857 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115313 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115319 4857 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115324 4857 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115329 4857 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115334 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115341 4857 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115347 4857 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115352 4857 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115357 4857 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115362 4857 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115367 4857 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115372 4857 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115377 4857 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115415 4857 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115421 4857 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115427 4857 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115432 4857 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115438 4857 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115444 4857 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115449 4857 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115455 4857 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115460 4857 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115465 4857 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115470 4857 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115475 4857 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115480 4857 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115485 4857 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115490 4857 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115495 4857 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115503 4857 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115510 4857 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115516 4857 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115521 4857 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115526 4857 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115531 4857 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115536 4857 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115541 4857 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.115545 4857 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.115904 4857 flags.go:64] FLAG: --address="0.0.0.0" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.115920 4857 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.115931 4857 flags.go:64] FLAG: --anonymous-auth="true" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.115939 4857 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.115947 4857 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.115954 4857 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.115962 4857 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.115971 4857 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.115977 4857 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.115983 4857 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.115989 4857 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.115996 4857 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116001 4857 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116007 4857 flags.go:64] FLAG: --cgroup-root="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116013 4857 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116019 4857 flags.go:64] FLAG: --client-ca-file="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116024 4857 flags.go:64] FLAG: --cloud-config="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116030 4857 flags.go:64] FLAG: --cloud-provider="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116035 4857 flags.go:64] FLAG: --cluster-dns="[]" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116042 4857 flags.go:64] FLAG: --cluster-domain="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116048 4857 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116054 4857 flags.go:64] FLAG: --config-dir="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116059 4857 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116066 4857 flags.go:64] FLAG: --container-log-max-files="5" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116073 4857 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116079 4857 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116085 4857 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116091 4857 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116096 4857 flags.go:64] FLAG: --contention-profiling="false" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116102 4857 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116108 4857 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116114 4857 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116120 4857 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116127 4857 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116133 4857 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116139 4857 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116144 4857 flags.go:64] FLAG: --enable-load-reader="false" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116150 4857 flags.go:64] FLAG: --enable-server="true" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116155 4857 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116164 4857 flags.go:64] FLAG: --event-burst="100" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116170 4857 flags.go:64] FLAG: --event-qps="50" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116176 4857 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116181 4857 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116188 4857 flags.go:64] FLAG: --eviction-hard="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116195 4857 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116201 4857 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116207 4857 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116214 4857 flags.go:64] FLAG: --eviction-soft="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116221 4857 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116227 4857 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116233 4857 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116240 4857 flags.go:64] FLAG: --experimental-mounter-path="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116246 4857 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116252 4857 flags.go:64] FLAG: --fail-swap-on="true" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116258 4857 flags.go:64] FLAG: --feature-gates="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116265 4857 flags.go:64] FLAG: --file-check-frequency="20s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116271 4857 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116277 4857 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116283 4857 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116289 4857 flags.go:64] FLAG: --healthz-port="10248" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116294 4857 flags.go:64] FLAG: --help="false" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116300 4857 flags.go:64] FLAG: --hostname-override="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116306 4857 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116312 4857 flags.go:64] FLAG: --http-check-frequency="20s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116319 4857 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116324 4857 flags.go:64] FLAG: --image-credential-provider-config="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116330 4857 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116336 4857 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116341 4857 flags.go:64] FLAG: --image-service-endpoint="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116347 4857 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116352 4857 flags.go:64] FLAG: --kube-api-burst="100" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116358 4857 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116365 4857 flags.go:64] FLAG: --kube-api-qps="50" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116370 4857 flags.go:64] FLAG: --kube-reserved="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116375 4857 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116381 4857 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116387 4857 flags.go:64] FLAG: --kubelet-cgroups="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116392 4857 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116398 4857 flags.go:64] FLAG: --lock-file="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116406 4857 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116413 4857 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116418 4857 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116427 4857 flags.go:64] FLAG: --log-json-split-stream="false" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116433 4857 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116439 4857 flags.go:64] FLAG: --log-text-split-stream="false" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116444 4857 flags.go:64] FLAG: --logging-format="text" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116449 4857 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116456 4857 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116463 4857 flags.go:64] FLAG: --manifest-url="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116469 4857 flags.go:64] FLAG: --manifest-url-header="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116477 4857 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116483 4857 flags.go:64] FLAG: --max-open-files="1000000" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116490 4857 flags.go:64] FLAG: --max-pods="110" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116496 4857 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116502 4857 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116509 4857 flags.go:64] FLAG: --memory-manager-policy="None" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116515 4857 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116522 4857 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116528 4857 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116534 4857 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116548 4857 flags.go:64] FLAG: --node-status-max-images="50" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116554 4857 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116560 4857 flags.go:64] FLAG: --oom-score-adj="-999" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116566 4857 flags.go:64] FLAG: --pod-cidr="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116571 4857 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116580 4857 flags.go:64] FLAG: --pod-manifest-path="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116585 4857 flags.go:64] FLAG: --pod-max-pids="-1" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116591 4857 flags.go:64] FLAG: --pods-per-core="0" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116597 4857 flags.go:64] FLAG: --port="10250" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116602 4857 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116608 4857 flags.go:64] FLAG: --provider-id="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116613 4857 flags.go:64] FLAG: --qos-reserved="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116619 4857 flags.go:64] FLAG: --read-only-port="10255" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116625 4857 flags.go:64] FLAG: --register-node="true" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116631 4857 flags.go:64] FLAG: --register-schedulable="true" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116638 4857 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116648 4857 flags.go:64] FLAG: --registry-burst="10" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116653 4857 flags.go:64] FLAG: --registry-qps="5" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116659 4857 flags.go:64] FLAG: --reserved-cpus="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116665 4857 flags.go:64] FLAG: --reserved-memory="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116672 4857 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116678 4857 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116683 4857 flags.go:64] FLAG: --rotate-certificates="false" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116689 4857 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116695 4857 flags.go:64] FLAG: --runonce="false" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116701 4857 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116706 4857 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116712 4857 flags.go:64] FLAG: --seccomp-default="false" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116718 4857 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116724 4857 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116729 4857 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116735 4857 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116742 4857 flags.go:64] FLAG: --storage-driver-password="root" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116767 4857 flags.go:64] FLAG: --storage-driver-secure="false" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116774 4857 flags.go:64] FLAG: --storage-driver-table="stats" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116780 4857 flags.go:64] FLAG: --storage-driver-user="root" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116787 4857 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116793 4857 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116800 4857 flags.go:64] FLAG: --system-cgroups="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116806 4857 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116815 4857 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116821 4857 flags.go:64] FLAG: --tls-cert-file="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116826 4857 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116833 4857 flags.go:64] FLAG: --tls-min-version="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116840 4857 flags.go:64] FLAG: --tls-private-key-file="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116845 4857 flags.go:64] FLAG: --topology-manager-policy="none" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116851 4857 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116858 4857 flags.go:64] FLAG: --topology-manager-scope="container" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116864 4857 flags.go:64] FLAG: --v="2" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116871 4857 flags.go:64] FLAG: --version="false" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116879 4857 flags.go:64] FLAG: --vmodule="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116887 4857 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.116893 4857 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117035 4857 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117042 4857 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117048 4857 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117053 4857 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117060 4857 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117067 4857 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117073 4857 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117079 4857 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117084 4857 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117089 4857 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117094 4857 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117100 4857 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117105 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117110 4857 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117115 4857 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117122 4857 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117128 4857 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117133 4857 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117138 4857 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117143 4857 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117148 4857 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117153 4857 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117158 4857 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117163 4857 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117168 4857 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117172 4857 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117177 4857 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117182 4857 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117188 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117192 4857 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117197 4857 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117202 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117207 4857 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117212 4857 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117218 4857 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117223 4857 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117229 4857 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117234 4857 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117239 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117244 4857 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117249 4857 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117254 4857 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117259 4857 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117264 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117269 4857 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117274 4857 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117279 4857 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117284 4857 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117288 4857 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117293 4857 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117298 4857 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117303 4857 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117308 4857 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117313 4857 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117320 4857 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117327 4857 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117333 4857 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117339 4857 feature_gate.go:330] unrecognized feature gate: Example Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117344 4857 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117349 4857 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117356 4857 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117362 4857 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117368 4857 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117373 4857 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117378 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117384 4857 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117391 4857 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117396 4857 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117402 4857 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117407 4857 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.117414 4857 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.117432 4857 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.126256 4857 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.126293 4857 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126417 4857 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126439 4857 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126446 4857 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126453 4857 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126461 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126469 4857 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126476 4857 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126483 4857 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126489 4857 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126496 4857 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126503 4857 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126510 4857 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126516 4857 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126522 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126529 4857 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126535 4857 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126541 4857 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126548 4857 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126555 4857 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126561 4857 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126567 4857 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126573 4857 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126582 4857 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126590 4857 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126598 4857 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126607 4857 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126615 4857 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126623 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126629 4857 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126636 4857 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126643 4857 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126649 4857 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126656 4857 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126663 4857 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126671 4857 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126678 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126685 4857 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126692 4857 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126699 4857 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126706 4857 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126712 4857 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126718 4857 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126725 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126732 4857 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126738 4857 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126745 4857 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126770 4857 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126780 4857 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126791 4857 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126799 4857 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126807 4857 feature_gate.go:330] unrecognized feature gate: Example Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126814 4857 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126821 4857 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126830 4857 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126839 4857 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126847 4857 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126855 4857 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126864 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126871 4857 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126877 4857 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126883 4857 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126890 4857 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126898 4857 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126905 4857 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126912 4857 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126918 4857 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126924 4857 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126931 4857 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126939 4857 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126948 4857 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.126957 4857 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.126968 4857 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127163 4857 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127176 4857 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127184 4857 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127191 4857 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127198 4857 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127207 4857 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127218 4857 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127226 4857 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127234 4857 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127243 4857 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127250 4857 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127258 4857 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127266 4857 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127273 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127281 4857 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127288 4857 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127296 4857 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127306 4857 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127313 4857 feature_gate.go:330] unrecognized feature gate: Example Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127320 4857 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127327 4857 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127334 4857 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127341 4857 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127348 4857 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127354 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127361 4857 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127368 4857 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127374 4857 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127381 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127387 4857 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127393 4857 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127400 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127437 4857 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127444 4857 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127451 4857 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127459 4857 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127465 4857 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127471 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127478 4857 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127485 4857 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127491 4857 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127497 4857 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127504 4857 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127510 4857 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127517 4857 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127523 4857 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127531 4857 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127541 4857 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127549 4857 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127557 4857 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127566 4857 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127572 4857 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127579 4857 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127586 4857 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127593 4857 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127599 4857 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127606 4857 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127612 4857 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127619 4857 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127625 4857 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127634 4857 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127641 4857 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127648 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127654 4857 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127661 4857 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127667 4857 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127674 4857 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127680 4857 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127687 4857 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127694 4857 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.127701 4857 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.127711 4857 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.128195 4857 server.go:940] "Client rotation is on, will bootstrap in background" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.131517 4857 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.131626 4857 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.132363 4857 server.go:997] "Starting client certificate rotation" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.132399 4857 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.132838 4857 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-09 09:54:18.607273746 +0000 UTC Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.132940 4857 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.142383 4857 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 13:18:28 crc kubenswrapper[4857]: E1128 13:18:28.144352 4857 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.227:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.145189 4857 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.158323 4857 log.go:25] "Validated CRI v1 runtime API" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.181563 4857 log.go:25] "Validated CRI v1 image API" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.183922 4857 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.186318 4857 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-28-13-13-55-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.186353 4857 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.203517 4857 manager.go:217] Machine: {Timestamp:2025-11-28 13:18:28.202182195 +0000 UTC m=+0.229557392 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654132736 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:7380db04-0488-4227-9557-a0513fb82c9e BootID:8c4e6844-6425-4e0e-8b58-4bf189cd3967 Filesystems:[{Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730829824 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827068416 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:f5:90:d1 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:f5:90:d1 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:01:4e:95 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:13:c0:65 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:cc:a9:0a Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:c5:42:a0 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:25:72:ea Speed:-1 Mtu:1496} {Name:eth10 MacAddress:6a:0c:01:eb:4f:7a Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:66:04:a6:35:9a:ae Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654132736 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.203880 4857 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.204004 4857 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.204776 4857 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.204976 4857 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.205015 4857 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.205242 4857 topology_manager.go:138] "Creating topology manager with none policy" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.205255 4857 container_manager_linux.go:303] "Creating device plugin manager" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.205511 4857 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.205548 4857 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.205727 4857 state_mem.go:36] "Initialized new in-memory state store" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.205845 4857 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.206789 4857 kubelet.go:418] "Attempting to sync node with API server" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.206816 4857 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.206845 4857 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.206862 4857 kubelet.go:324] "Adding apiserver pod source" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.206878 4857 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.208946 4857 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.209151 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.227:6443: connect: connection refused Nov 28 13:18:28 crc kubenswrapper[4857]: E1128 13:18:28.209261 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.227:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.209600 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.227:6443: connect: connection refused Nov 28 13:18:28 crc kubenswrapper[4857]: E1128 13:18:28.209679 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.227:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.209684 4857 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.210915 4857 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.211874 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.211953 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.211971 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.211985 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.212008 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.212024 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.212039 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.212061 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.212077 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.212093 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.212134 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.212150 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.212804 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.214003 4857 server.go:1280] "Started kubelet" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.214443 4857 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.215348 4857 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.227:6443: connect: connection refused Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.215303 4857 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 28 13:18:28 crc systemd[1]: Started Kubernetes Kubelet. Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.216573 4857 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 28 13:18:28 crc kubenswrapper[4857]: E1128 13:18:28.218632 4857 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.227:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c2e292187f7ad default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 13:18:28.213487533 +0000 UTC m=+0.240862740,LastTimestamp:2025-11-28 13:18:28.213487533 +0000 UTC m=+0.240862740,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.224252 4857 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.224319 4857 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.224620 4857 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.224703 4857 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.224867 4857 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 28 13:18:28 crc kubenswrapper[4857]: E1128 13:18:28.224904 4857 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.225283 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.227:6443: connect: connection refused Nov 28 13:18:28 crc kubenswrapper[4857]: E1128 13:18:28.225353 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.227:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.225700 4857 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.225721 4857 factory.go:55] Registering systemd factory Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.225733 4857 factory.go:221] Registration of the systemd container factory successfully Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.224544 4857 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 15:07:16.658106183 +0000 UTC Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.225879 4857 server.go:460] "Adding debug handlers to kubelet server" Nov 28 13:18:28 crc kubenswrapper[4857]: E1128 13:18:28.226113 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.227:6443: connect: connection refused" interval="200ms" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.227399 4857 factory.go:153] Registering CRI-O factory Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.227444 4857 factory.go:221] Registration of the crio container factory successfully Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.227485 4857 factory.go:103] Registering Raw factory Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.227512 4857 manager.go:1196] Started watching for new ooms in manager Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.231503 4857 manager.go:319] Starting recovery of all containers Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.236813 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.237019 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.237108 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.237189 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.237302 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.237388 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.237501 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.237586 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.237764 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.237854 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.237933 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.238033 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.238115 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.238238 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.238334 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.238418 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.238518 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.246188 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.249425 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.249458 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.249472 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.249485 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.249503 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.249517 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.254138 4857 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.254966 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255200 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255255 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255277 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255289 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255301 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255330 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255344 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255405 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255419 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255431 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255446 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255516 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255641 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255669 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255695 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255710 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255727 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255743 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255814 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255896 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255963 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.255977 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256010 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256124 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256153 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256174 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256229 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256424 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256514 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256572 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256593 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256612 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256626 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256660 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256675 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256690 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256708 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256744 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256784 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256801 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256815 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256868 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256884 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256900 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256927 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256943 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256964 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.256978 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257008 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257033 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257048 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257082 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257096 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257112 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257135 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257195 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257212 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257243 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257262 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257277 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257294 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257328 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257342 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257359 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257372 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257400 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257419 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257437 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257471 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257488 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257501 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257533 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257563 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257580 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257594 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257608 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257644 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257658 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257675 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257734 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257793 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257823 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257843 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257896 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257954 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.257982 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258009 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258012 4857 manager.go:324] Recovery completed Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258051 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258071 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258099 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258117 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258140 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258157 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258179 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258202 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258234 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258259 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258279 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258298 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258319 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258336 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258364 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258384 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258410 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258432 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258456 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258474 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258499 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258517 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258539 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258559 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258575 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258597 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.258616 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259145 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259173 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259186 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259214 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259228 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259241 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259253 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259281 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259294 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259306 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259319 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259331 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259356 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259369 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259381 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259395 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259410 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259442 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259456 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259471 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259491 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259522 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259537 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259551 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259566 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259601 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259640 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259653 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259679 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259693 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259706 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259718 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259731 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259775 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259788 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259800 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259811 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259821 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259848 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259859 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259870 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259881 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259893 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259904 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259931 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259944 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259956 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259969 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.259982 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260009 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260020 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260032 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260043 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260055 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260085 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260126 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260138 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260170 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260182 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260194 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260205 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260218 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260246 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260258 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260270 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260282 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260293 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260321 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260332 4857 reconstruct.go:97] "Volume reconstruction finished" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.260340 4857 reconciler.go:26] "Reconciler: start to sync state" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.268281 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.270110 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.270168 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.270183 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.270904 4857 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.270926 4857 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.270946 4857 state_mem.go:36] "Initialized new in-memory state store" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.297324 4857 policy_none.go:49] "None policy: Start" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.298569 4857 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.298618 4857 state_mem.go:35] "Initializing new in-memory state store" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.306002 4857 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.308174 4857 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.308227 4857 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.308274 4857 kubelet.go:2335] "Starting kubelet main sync loop" Nov 28 13:18:28 crc kubenswrapper[4857]: E1128 13:18:28.308514 4857 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.309380 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.227:6443: connect: connection refused Nov 28 13:18:28 crc kubenswrapper[4857]: E1128 13:18:28.309495 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.227:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:18:28 crc kubenswrapper[4857]: E1128 13:18:28.325740 4857 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.370279 4857 manager.go:334] "Starting Device Plugin manager" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.370515 4857 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.370532 4857 server.go:79] "Starting device plugin registration server" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.371112 4857 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.371174 4857 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.371536 4857 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.371667 4857 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.371675 4857 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 28 13:18:28 crc kubenswrapper[4857]: E1128 13:18:28.383336 4857 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.409604 4857 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.409726 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.411156 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.411240 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.411258 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.411515 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.411717 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.411775 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.412993 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.413045 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.413059 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.413236 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.413272 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.413286 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.413447 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.413575 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.413605 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.414126 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.414153 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.414164 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.414340 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.414349 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.414386 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.414459 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.414604 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.414623 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.415285 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.415313 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.415326 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.415659 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.415682 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.415663 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.415694 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.415768 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.415801 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.416306 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.416329 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.416345 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.416520 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.416556 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.416898 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.416925 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.416939 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.417240 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.417264 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.417276 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:28 crc kubenswrapper[4857]: E1128 13:18:28.426965 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.227:6443: connect: connection refused" interval="400ms" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.462240 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.462313 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.462355 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.462377 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.462401 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.462421 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.462444 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.462561 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.462659 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.462725 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.462795 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.462855 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.462889 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.462920 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.462956 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.472082 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.473601 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.473643 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.473654 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.473685 4857 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 13:18:28 crc kubenswrapper[4857]: E1128 13:18:28.474256 4857 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.227:6443: connect: connection refused" node="crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.564664 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.564740 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.564785 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.564810 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.564848 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.564876 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.564901 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.564928 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.564949 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.564969 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.564990 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.565009 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.565029 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.565081 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.565098 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.565133 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.565148 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.565245 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.565251 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.565314 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.565310 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.565302 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.565872 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.565898 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.565954 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.565991 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.565277 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.566020 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.565292 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.566110 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: E1128 13:18:28.576908 4857 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.227:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c2e292187f7ad default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 13:18:28.213487533 +0000 UTC m=+0.240862740,LastTimestamp:2025-11-28 13:18:28.213487533 +0000 UTC m=+0.240862740,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.674589 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.676354 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.676429 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.676452 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.676499 4857 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 13:18:28 crc kubenswrapper[4857]: E1128 13:18:28.676964 4857 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.227:6443: connect: connection refused" node="crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.749961 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.765729 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.780435 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.781945 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-f2da00d8b0614f95493299b1f93378a1caf22617ded508a8025409b92bc3a67f WatchSource:0}: Error finding container f2da00d8b0614f95493299b1f93378a1caf22617ded508a8025409b92bc3a67f: Status 404 returned error can't find the container with id f2da00d8b0614f95493299b1f93378a1caf22617ded508a8025409b92bc3a67f Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.801904 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: I1128 13:18:28.807912 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:18:28 crc kubenswrapper[4857]: E1128 13:18:28.827729 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.227:6443: connect: connection refused" interval="800ms" Nov 28 13:18:28 crc kubenswrapper[4857]: W1128 13:18:28.835094 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-d7d46be10263185c9febed0b53d2f3c7392f7e0cdf55b9d7a04afc960421b1a6 WatchSource:0}: Error finding container d7d46be10263185c9febed0b53d2f3c7392f7e0cdf55b9d7a04afc960421b1a6: Status 404 returned error can't find the container with id d7d46be10263185c9febed0b53d2f3c7392f7e0cdf55b9d7a04afc960421b1a6 Nov 28 13:18:29 crc kubenswrapper[4857]: I1128 13:18:29.077506 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:29 crc kubenswrapper[4857]: I1128 13:18:29.079119 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:29 crc kubenswrapper[4857]: I1128 13:18:29.079190 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:29 crc kubenswrapper[4857]: I1128 13:18:29.079208 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:29 crc kubenswrapper[4857]: I1128 13:18:29.079245 4857 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 13:18:29 crc kubenswrapper[4857]: E1128 13:18:29.079839 4857 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.227:6443: connect: connection refused" node="crc" Nov 28 13:18:29 crc kubenswrapper[4857]: I1128 13:18:29.216347 4857 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.227:6443: connect: connection refused Nov 28 13:18:29 crc kubenswrapper[4857]: I1128 13:18:29.226596 4857 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 10:13:00.058018997 +0000 UTC Nov 28 13:18:29 crc kubenswrapper[4857]: I1128 13:18:29.226663 4857 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 524h54m30.831358145s for next certificate rotation Nov 28 13:18:29 crc kubenswrapper[4857]: I1128 13:18:29.316125 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"d7d46be10263185c9febed0b53d2f3c7392f7e0cdf55b9d7a04afc960421b1a6"} Nov 28 13:18:29 crc kubenswrapper[4857]: I1128 13:18:29.317362 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"d547dfadbfce3c29381afff8444d8053136f665b290ebb758d7d6f6c6e848042"} Nov 28 13:18:29 crc kubenswrapper[4857]: I1128 13:18:29.318249 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"158bee9ca75f053b1af190831d7dc5f4298f62ce07fbb81f2ac85c8fcad50c78"} Nov 28 13:18:29 crc kubenswrapper[4857]: I1128 13:18:29.319276 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f96be2dc9d6fab9c2de4a7706cc1e943ec32c82eb982095bd19f55c34486211c"} Nov 28 13:18:29 crc kubenswrapper[4857]: I1128 13:18:29.320188 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"f2da00d8b0614f95493299b1f93378a1caf22617ded508a8025409b92bc3a67f"} Nov 28 13:18:29 crc kubenswrapper[4857]: W1128 13:18:29.358827 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.227:6443: connect: connection refused Nov 28 13:18:29 crc kubenswrapper[4857]: E1128 13:18:29.358959 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.227:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:18:29 crc kubenswrapper[4857]: W1128 13:18:29.533238 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.227:6443: connect: connection refused Nov 28 13:18:29 crc kubenswrapper[4857]: E1128 13:18:29.533376 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.227:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:18:29 crc kubenswrapper[4857]: W1128 13:18:29.584729 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.227:6443: connect: connection refused Nov 28 13:18:29 crc kubenswrapper[4857]: E1128 13:18:29.584869 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.227:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:18:29 crc kubenswrapper[4857]: E1128 13:18:29.629015 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.227:6443: connect: connection refused" interval="1.6s" Nov 28 13:18:29 crc kubenswrapper[4857]: W1128 13:18:29.653259 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.227:6443: connect: connection refused Nov 28 13:18:29 crc kubenswrapper[4857]: E1128 13:18:29.653374 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.227:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:18:29 crc kubenswrapper[4857]: I1128 13:18:29.880183 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:29 crc kubenswrapper[4857]: I1128 13:18:29.881627 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:29 crc kubenswrapper[4857]: I1128 13:18:29.881678 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:29 crc kubenswrapper[4857]: I1128 13:18:29.881692 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:29 crc kubenswrapper[4857]: I1128 13:18:29.881721 4857 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 13:18:29 crc kubenswrapper[4857]: E1128 13:18:29.882292 4857 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.227:6443: connect: connection refused" node="crc" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.216144 4857 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.227:6443: connect: connection refused Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.299811 4857 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 28 13:18:30 crc kubenswrapper[4857]: E1128 13:18:30.301179 4857 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.227:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.326120 4857 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d" exitCode=0 Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.326194 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d"} Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.326400 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.327818 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.327861 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.327874 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.329545 4857 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="5335e99c6c1f70658778ab57280d7b8cb2ab151b9f523bd1cc42354ef53f76ac" exitCode=0 Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.329585 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.329672 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"5335e99c6c1f70658778ab57280d7b8cb2ab151b9f523bd1cc42354ef53f76ac"} Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.330739 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.330810 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.330830 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.333076 4857 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23" exitCode=0 Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.333169 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23"} Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.333297 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.334494 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.334531 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.334629 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.337493 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502"} Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.337550 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.337557 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6"} Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.337674 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf"} Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.337707 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3"} Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.338634 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.338698 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.338714 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.339716 4857 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f" exitCode=0 Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.339765 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f"} Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.339853 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.340809 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.340834 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.340844 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.342977 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.343842 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.343877 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:30 crc kubenswrapper[4857]: I1128 13:18:30.343890 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.345222 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"953e18c7e26d1dbbd6f09ba86ce60483d35bd6bb271a76998acbc9e2d333a034"} Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.345304 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"2d3e7e3b66ec5d45bfcbe5f4de7e21b540ba5bcc9859f3753465db8f992b731d"} Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.345334 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"cc6c67a288ed242cc97c05f9c8a01591c4ea3c3b8bb11e4e76d38bba7dd17f15"} Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.345303 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.346585 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.346623 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.346634 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.347363 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4"} Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.347403 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7"} Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.347414 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee"} Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.349393 4857 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c" exitCode=0 Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.349431 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c"} Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.349508 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.350417 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.350470 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.350484 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.351475 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"512c4d00871836ce981f36aec4bd31095bd0d35afbac52016837aa0aad7d337b"} Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.351526 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.351619 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.352534 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.352563 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.352578 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.352971 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.353002 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.353014 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.483388 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.485152 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.485287 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.485415 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.485458 4857 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 13:18:31 crc kubenswrapper[4857]: I1128 13:18:31.728535 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.358150 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c"} Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.358235 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49"} Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.358416 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.359709 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.359783 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.359803 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.362690 4857 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4" exitCode=0 Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.362777 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.363164 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.363435 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4"} Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.363514 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.364196 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.364217 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.364226 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.364709 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.364730 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.364739 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.365368 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.365389 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:32 crc kubenswrapper[4857]: I1128 13:18:32.365397 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:33 crc kubenswrapper[4857]: I1128 13:18:33.370275 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc"} Nov 28 13:18:33 crc kubenswrapper[4857]: I1128 13:18:33.370340 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a"} Nov 28 13:18:33 crc kubenswrapper[4857]: I1128 13:18:33.370356 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96"} Nov 28 13:18:33 crc kubenswrapper[4857]: I1128 13:18:33.370384 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 13:18:33 crc kubenswrapper[4857]: I1128 13:18:33.370455 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:33 crc kubenswrapper[4857]: I1128 13:18:33.370574 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:33 crc kubenswrapper[4857]: I1128 13:18:33.371937 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:33 crc kubenswrapper[4857]: I1128 13:18:33.372007 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:33 crc kubenswrapper[4857]: I1128 13:18:33.372029 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:33 crc kubenswrapper[4857]: I1128 13:18:33.372695 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:33 crc kubenswrapper[4857]: I1128 13:18:33.372783 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:33 crc kubenswrapper[4857]: I1128 13:18:33.372804 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:33 crc kubenswrapper[4857]: I1128 13:18:33.799871 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:18:33 crc kubenswrapper[4857]: I1128 13:18:33.800163 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:33 crc kubenswrapper[4857]: I1128 13:18:33.802552 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:33 crc kubenswrapper[4857]: I1128 13:18:33.802613 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:33 crc kubenswrapper[4857]: I1128 13:18:33.802642 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:33 crc kubenswrapper[4857]: I1128 13:18:33.808930 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:18:34 crc kubenswrapper[4857]: I1128 13:18:34.380735 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21"} Nov 28 13:18:34 crc kubenswrapper[4857]: I1128 13:18:34.380838 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00"} Nov 28 13:18:34 crc kubenswrapper[4857]: I1128 13:18:34.380793 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:34 crc kubenswrapper[4857]: I1128 13:18:34.380869 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:34 crc kubenswrapper[4857]: I1128 13:18:34.382479 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:34 crc kubenswrapper[4857]: I1128 13:18:34.382545 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:34 crc kubenswrapper[4857]: I1128 13:18:34.382565 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:34 crc kubenswrapper[4857]: I1128 13:18:34.382837 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:34 crc kubenswrapper[4857]: I1128 13:18:34.382880 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:34 crc kubenswrapper[4857]: I1128 13:18:34.382898 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:34 crc kubenswrapper[4857]: I1128 13:18:34.594994 4857 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 28 13:18:34 crc kubenswrapper[4857]: I1128 13:18:34.810293 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:18:34 crc kubenswrapper[4857]: I1128 13:18:34.810486 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 13:18:34 crc kubenswrapper[4857]: I1128 13:18:34.810539 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:34 crc kubenswrapper[4857]: I1128 13:18:34.812211 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:34 crc kubenswrapper[4857]: I1128 13:18:34.812247 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:34 crc kubenswrapper[4857]: I1128 13:18:34.812256 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:34 crc kubenswrapper[4857]: I1128 13:18:34.951838 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:18:35 crc kubenswrapper[4857]: I1128 13:18:35.384144 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:35 crc kubenswrapper[4857]: I1128 13:18:35.384149 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:35 crc kubenswrapper[4857]: I1128 13:18:35.385691 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:35 crc kubenswrapper[4857]: I1128 13:18:35.385736 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:35 crc kubenswrapper[4857]: I1128 13:18:35.385773 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:35 crc kubenswrapper[4857]: I1128 13:18:35.385953 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:35 crc kubenswrapper[4857]: I1128 13:18:35.386023 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:35 crc kubenswrapper[4857]: I1128 13:18:35.386037 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:35 crc kubenswrapper[4857]: I1128 13:18:35.424800 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:18:35 crc kubenswrapper[4857]: I1128 13:18:35.425086 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:35 crc kubenswrapper[4857]: I1128 13:18:35.426723 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:35 crc kubenswrapper[4857]: I1128 13:18:35.426816 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:35 crc kubenswrapper[4857]: I1128 13:18:35.426837 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:35 crc kubenswrapper[4857]: I1128 13:18:35.446792 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:18:35 crc kubenswrapper[4857]: I1128 13:18:35.708793 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:18:36 crc kubenswrapper[4857]: I1128 13:18:36.387361 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:36 crc kubenswrapper[4857]: I1128 13:18:36.388241 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:36 crc kubenswrapper[4857]: I1128 13:18:36.389327 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:36 crc kubenswrapper[4857]: I1128 13:18:36.389377 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:36 crc kubenswrapper[4857]: I1128 13:18:36.389396 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:36 crc kubenswrapper[4857]: I1128 13:18:36.390691 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:36 crc kubenswrapper[4857]: I1128 13:18:36.390729 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:36 crc kubenswrapper[4857]: I1128 13:18:36.390747 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:36 crc kubenswrapper[4857]: I1128 13:18:36.635406 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 28 13:18:36 crc kubenswrapper[4857]: I1128 13:18:36.635674 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:36 crc kubenswrapper[4857]: I1128 13:18:36.637505 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:36 crc kubenswrapper[4857]: I1128 13:18:36.637566 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:36 crc kubenswrapper[4857]: I1128 13:18:36.637584 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:37 crc kubenswrapper[4857]: I1128 13:18:37.212221 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 28 13:18:37 crc kubenswrapper[4857]: I1128 13:18:37.390697 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:37 crc kubenswrapper[4857]: I1128 13:18:37.392523 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:37 crc kubenswrapper[4857]: I1128 13:18:37.392604 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:37 crc kubenswrapper[4857]: I1128 13:18:37.392630 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:38 crc kubenswrapper[4857]: E1128 13:18:38.384302 4857 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 13:18:38 crc kubenswrapper[4857]: I1128 13:18:38.447174 4857 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 13:18:38 crc kubenswrapper[4857]: I1128 13:18:38.447300 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 13:18:39 crc kubenswrapper[4857]: I1128 13:18:39.369897 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:18:39 crc kubenswrapper[4857]: I1128 13:18:39.370564 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:39 crc kubenswrapper[4857]: I1128 13:18:39.372863 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:39 crc kubenswrapper[4857]: I1128 13:18:39.372933 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:39 crc kubenswrapper[4857]: I1128 13:18:39.372954 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:39 crc kubenswrapper[4857]: I1128 13:18:39.377610 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:18:39 crc kubenswrapper[4857]: I1128 13:18:39.399554 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:39 crc kubenswrapper[4857]: I1128 13:18:39.401045 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:39 crc kubenswrapper[4857]: I1128 13:18:39.401102 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:39 crc kubenswrapper[4857]: I1128 13:18:39.401122 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:41 crc kubenswrapper[4857]: I1128 13:18:41.217558 4857 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 28 13:18:41 crc kubenswrapper[4857]: E1128 13:18:41.230263 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" interval="3.2s" Nov 28 13:18:41 crc kubenswrapper[4857]: E1128 13:18:41.487547 4857 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": net/http: TLS handshake timeout" node="crc" Nov 28 13:18:41 crc kubenswrapper[4857]: W1128 13:18:41.854797 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 28 13:18:41 crc kubenswrapper[4857]: I1128 13:18:41.855112 4857 trace.go:236] Trace[1995477746]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 13:18:31.853) (total time: 10001ms): Nov 28 13:18:41 crc kubenswrapper[4857]: Trace[1995477746]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (13:18:41.854) Nov 28 13:18:41 crc kubenswrapper[4857]: Trace[1995477746]: [10.001978407s] [10.001978407s] END Nov 28 13:18:41 crc kubenswrapper[4857]: E1128 13:18:41.855245 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 28 13:18:41 crc kubenswrapper[4857]: W1128 13:18:41.881273 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 28 13:18:41 crc kubenswrapper[4857]: I1128 13:18:41.881391 4857 trace.go:236] Trace[904633674]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 13:18:31.879) (total time: 10001ms): Nov 28 13:18:41 crc kubenswrapper[4857]: Trace[904633674]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (13:18:41.881) Nov 28 13:18:41 crc kubenswrapper[4857]: Trace[904633674]: [10.001410981s] [10.001410981s] END Nov 28 13:18:41 crc kubenswrapper[4857]: E1128 13:18:41.881419 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 28 13:18:41 crc kubenswrapper[4857]: W1128 13:18:41.916673 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 28 13:18:41 crc kubenswrapper[4857]: I1128 13:18:41.916793 4857 trace.go:236] Trace[360643030]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 13:18:31.915) (total time: 10001ms): Nov 28 13:18:41 crc kubenswrapper[4857]: Trace[360643030]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (13:18:41.916) Nov 28 13:18:41 crc kubenswrapper[4857]: Trace[360643030]: [10.001464932s] [10.001464932s] END Nov 28 13:18:41 crc kubenswrapper[4857]: E1128 13:18:41.916814 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 28 13:18:42 crc kubenswrapper[4857]: W1128 13:18:42.238284 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 28 13:18:42 crc kubenswrapper[4857]: I1128 13:18:42.238426 4857 trace.go:236] Trace[185432980]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 13:18:32.237) (total time: 10001ms): Nov 28 13:18:42 crc kubenswrapper[4857]: Trace[185432980]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (13:18:42.238) Nov 28 13:18:42 crc kubenswrapper[4857]: Trace[185432980]: [10.001218215s] [10.001218215s] END Nov 28 13:18:42 crc kubenswrapper[4857]: E1128 13:18:42.238460 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 28 13:18:42 crc kubenswrapper[4857]: I1128 13:18:42.612696 4857 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 28 13:18:42 crc kubenswrapper[4857]: I1128 13:18:42.612792 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 28 13:18:42 crc kubenswrapper[4857]: I1128 13:18:42.624077 4857 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 28 13:18:42 crc kubenswrapper[4857]: I1128 13:18:42.624173 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 28 13:18:44 crc kubenswrapper[4857]: I1128 13:18:44.687941 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:44 crc kubenswrapper[4857]: I1128 13:18:44.690431 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:44 crc kubenswrapper[4857]: I1128 13:18:44.690486 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:44 crc kubenswrapper[4857]: I1128 13:18:44.690505 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:44 crc kubenswrapper[4857]: I1128 13:18:44.690542 4857 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 13:18:44 crc kubenswrapper[4857]: E1128 13:18:44.695895 4857 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 28 13:18:45 crc kubenswrapper[4857]: I1128 13:18:45.104899 4857 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 13:18:45 crc kubenswrapper[4857]: I1128 13:18:45.717380 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:18:45 crc kubenswrapper[4857]: I1128 13:18:45.717610 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:45 crc kubenswrapper[4857]: I1128 13:18:45.719300 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:45 crc kubenswrapper[4857]: I1128 13:18:45.719367 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:45 crc kubenswrapper[4857]: I1128 13:18:45.719391 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:45 crc kubenswrapper[4857]: I1128 13:18:45.724534 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:18:46 crc kubenswrapper[4857]: I1128 13:18:46.419815 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 13:18:46 crc kubenswrapper[4857]: I1128 13:18:46.419902 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:46 crc kubenswrapper[4857]: I1128 13:18:46.420995 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:46 crc kubenswrapper[4857]: I1128 13:18:46.421296 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:46 crc kubenswrapper[4857]: I1128 13:18:46.421430 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.107222 4857 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.217859 4857 apiserver.go:52] "Watching apiserver" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.220889 4857 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.221226 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.221671 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.221682 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 13:18:47 crc kubenswrapper[4857]: E1128 13:18:47.221802 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.222767 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:18:47 crc kubenswrapper[4857]: E1128 13:18:47.222876 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.223732 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.223882 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.224043 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:18:47 crc kubenswrapper[4857]: E1128 13:18:47.224136 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.225647 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.226905 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.227236 4857 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.231114 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.231517 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.232356 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.232532 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.232666 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.232702 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.232986 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.249889 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.260094 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.266070 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.268304 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.274387 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.290133 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.302110 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.311970 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.321360 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.332406 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.342219 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.361667 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.373493 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.383649 4857 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.392693 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.407219 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.419848 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.437842 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.620557 4857 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.635462 4857 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.655183 4857 csr.go:261] certificate signing request csr-gk7d8 is approved, waiting to be issued Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.665493 4857 csr.go:257] certificate signing request csr-gk7d8 is issued Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.689064 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.695381 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.708970 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.721447 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.721520 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.721555 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.721578 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.721602 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.721625 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.723406 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.723898 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.723983 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.724051 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.724145 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.724237 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.724324 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.722066 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.722152 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.722269 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.722265 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.722316 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.722833 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.724406 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.724418 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.724599 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.724635 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.724668 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.724705 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.724742 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.724803 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.724836 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.724865 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.724900 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.724935 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.724960 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.724971 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725053 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725059 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725094 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725128 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725163 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725199 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725236 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725270 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725308 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725345 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725305 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725377 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725410 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725443 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725475 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725511 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725548 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725584 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725618 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725649 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725683 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725718 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725777 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725835 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725871 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725905 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725940 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726014 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726060 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726098 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726131 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726166 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726201 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726236 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726267 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726301 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726333 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726363 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726395 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726428 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726463 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726497 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726531 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726564 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726600 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725412 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726563 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725447 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725657 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726651 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726670 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725773 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725816 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725776 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.725868 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726060 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726083 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726240 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726255 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726788 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726264 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726416 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726604 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726996 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.727011 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.727034 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.727051 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.727247 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.727308 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.727335 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.726631 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728170 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728201 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728207 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728232 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728263 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728296 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728251 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728336 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728365 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728390 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728414 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728437 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728461 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728483 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728507 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728530 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728552 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728574 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728576 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728597 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728621 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728643 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728667 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728699 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728707 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728722 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728746 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728788 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728812 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728836 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728861 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728886 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728912 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728936 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728963 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728985 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.728997 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729031 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729056 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729096 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729144 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729299 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729292 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729338 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729377 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729421 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729455 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729488 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729528 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729536 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729609 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729615 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729641 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729725 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729746 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729782 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729802 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729827 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729890 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729917 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729938 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729956 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.729996 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730013 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730032 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730052 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730051 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730069 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730089 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730106 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730124 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730144 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730163 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730184 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730204 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730223 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730240 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730258 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730279 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730300 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730319 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730337 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730355 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730374 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730391 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730403 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730408 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730460 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730488 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730513 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730538 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730565 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730597 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730672 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730700 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730727 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730777 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730806 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730832 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730857 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730882 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730918 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730943 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.730972 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731017 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731045 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731071 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731079 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731101 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731128 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731155 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731180 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731203 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731227 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731252 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731275 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731297 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731322 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731346 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731369 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731394 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731428 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731437 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731448 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731521 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731857 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.732052 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.732076 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.732195 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.732369 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.732632 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.732855 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.733069 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.733174 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.733268 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.731452 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.733490 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.733531 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.733556 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.733580 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.733712 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.733745 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.733789 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.733814 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.733838 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.733867 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.733891 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.733914 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.733935 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.733955 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.733990 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.734019 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.734100 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.734143 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.734165 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.734193 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.734216 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.734240 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.734258 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.734278 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.734304 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.734327 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.734346 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.734369 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.734391 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.734504 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.734726 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.734975 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.735573 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.736280 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.736349 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.736924 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.737165 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.737172 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.734412 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.738226 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.739187 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.739242 4857 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.739258 4857 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.739276 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.739296 4857 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.739332 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.740773 4857 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.740803 4857 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.740818 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.740836 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.740854 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.740867 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.740936 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.740951 4857 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.740973 4857 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.740986 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741001 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741015 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741028 4857 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741042 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741054 4857 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741065 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741078 4857 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741090 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741101 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741112 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741123 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741134 4857 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741147 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741158 4857 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741169 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741181 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741192 4857 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741202 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741213 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741225 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741235 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741247 4857 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741258 4857 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741269 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741283 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741299 4857 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741310 4857 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741322 4857 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741334 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741347 4857 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741358 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741369 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741380 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741390 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741400 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741411 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741421 4857 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741433 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741445 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741458 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741472 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741483 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741494 4857 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741504 4857 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741515 4857 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741525 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741536 4857 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741546 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741558 4857 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741569 4857 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741579 4857 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741593 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.738463 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.738494 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.738840 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.738932 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.739102 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.739035 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.739079 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.739364 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.739571 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.739850 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.740024 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.740327 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.740416 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.740531 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.740954 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741277 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741543 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741693 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741880 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.741960 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.742114 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.742123 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.739251 4857 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.742351 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.742502 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.742509 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: E1128 13:18:47.742793 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:18:48.242765341 +0000 UTC m=+20.270140508 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.742798 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.742917 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.743083 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.743118 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.743172 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.743346 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.743435 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.743558 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.743572 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.743745 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.743955 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.744097 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.744830 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.744979 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.745088 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.745275 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.745563 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.745891 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.745911 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.746063 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.746397 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: E1128 13:18:47.746875 4857 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:18:47 crc kubenswrapper[4857]: E1128 13:18:47.746952 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:18:48.24693362 +0000 UTC m=+20.274308787 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.746918 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.748021 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.750267 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.751042 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: E1128 13:18:47.752152 4857 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:18:47 crc kubenswrapper[4857]: E1128 13:18:47.752364 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:18:48.252316804 +0000 UTC m=+20.279692171 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.754233 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.754432 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.754542 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.754628 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.755049 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.755138 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.755460 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.755490 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.756236 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.756323 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.756732 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.756869 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.757176 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.756929 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.758678 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.764042 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.764283 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.764191 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.765323 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.765904 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.765956 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.765993 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.766366 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.766494 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.766634 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.766670 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.768216 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.768851 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.768865 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.769055 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.769075 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.770049 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.770358 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.770551 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.770626 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: E1128 13:18:47.770702 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:18:47 crc kubenswrapper[4857]: E1128 13:18:47.770731 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:18:47 crc kubenswrapper[4857]: E1128 13:18:47.770781 4857 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.770829 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: E1128 13:18:47.770877 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 13:18:48.270846393 +0000 UTC m=+20.298221590 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.771112 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.771951 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: E1128 13:18:47.775007 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:18:47 crc kubenswrapper[4857]: E1128 13:18:47.775033 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:18:47 crc kubenswrapper[4857]: E1128 13:18:47.775049 4857 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:47 crc kubenswrapper[4857]: E1128 13:18:47.775185 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 13:18:48.275163247 +0000 UTC m=+20.302538424 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.776288 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.776711 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.778234 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.778263 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.780087 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.780763 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.782663 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.783550 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.783849 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.784009 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.784531 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.784652 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.786959 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.787829 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.788338 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.788425 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.788645 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.788695 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.789036 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.789106 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.789166 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.790276 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.793957 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.794417 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.794645 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.794950 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.795259 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.795424 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.795916 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.796149 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.796593 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.796841 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.796943 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.797438 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.798203 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.799092 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.804073 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.806306 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.809387 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.809491 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.810672 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.812621 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.814761 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.815512 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.815863 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.816076 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.816101 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.816378 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.823581 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.829442 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.834470 4857 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.838192 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.842986 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843043 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843119 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843134 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843146 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843156 4857 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843168 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843181 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843192 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843202 4857 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843211 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843221 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843230 4857 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843241 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843252 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843262 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843272 4857 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843282 4857 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843293 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843304 4857 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843314 4857 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843324 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843334 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843345 4857 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843358 4857 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843369 4857 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843380 4857 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843392 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843384 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843404 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843474 4857 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843487 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843497 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843506 4857 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843517 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843528 4857 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843547 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843560 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844062 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844077 4857 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844091 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844102 4857 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844114 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.843448 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844126 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844176 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844204 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844213 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844222 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844231 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844241 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844249 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844258 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844266 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844274 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844282 4857 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844290 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844298 4857 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844305 4857 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844314 4857 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844322 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844330 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844339 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844348 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844356 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844366 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844374 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844383 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844390 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844430 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844439 4857 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844447 4857 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844455 4857 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844464 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844471 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844479 4857 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844487 4857 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844494 4857 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844504 4857 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844512 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844520 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844528 4857 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844536 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844545 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844554 4857 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844562 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844571 4857 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844578 4857 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844586 4857 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844594 4857 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844603 4857 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844611 4857 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844618 4857 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844626 4857 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844633 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844641 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844649 4857 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844659 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844671 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844680 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844688 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844697 4857 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844705 4857 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844713 4857 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844720 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844728 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844737 4857 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844746 4857 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844781 4857 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844793 4857 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844804 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844816 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844827 4857 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844837 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844847 4857 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844856 4857 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844865 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844874 4857 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844883 4857 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844890 4857 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844898 4857 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844905 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844913 4857 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844921 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844929 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844937 4857 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844945 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.844954 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.846486 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.849158 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.855568 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.860492 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.875191 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.884027 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.896386 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.904519 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.912917 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.929653 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:47 crc kubenswrapper[4857]: I1128 13:18:47.951333 4857 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.105418 4857 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:49070->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.105443 4857 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33338->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.105480 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:49070->192.168.126.11:17697: read: connection reset by peer" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.105519 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33338->192.168.126.11:17697: read: connection reset by peer" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.111239 4857 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.111709 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.133099 4857 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Nov 28 13:18:48 crc kubenswrapper[4857]: W1128 13:18:48.133374 4857 reflector.go:484] object-"openshift-network-operator"/"metrics-tls": watch of *v1.Secret ended with: very short watch: object-"openshift-network-operator"/"metrics-tls": Unexpected watch close - watch lasted less than a second and no items received Nov 28 13:18:48 crc kubenswrapper[4857]: W1128 13:18:48.133423 4857 reflector.go:484] object-"openshift-network-operator"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-operator"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Nov 28 13:18:48 crc kubenswrapper[4857]: W1128 13:18:48.133474 4857 reflector.go:484] object-"openshift-network-node-identity"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-node-identity"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Nov 28 13:18:48 crc kubenswrapper[4857]: W1128 13:18:48.133374 4857 reflector.go:484] object-"openshift-network-node-identity"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-node-identity"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Nov 28 13:18:48 crc kubenswrapper[4857]: W1128 13:18:48.133553 4857 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.Service ended with: very short watch: k8s.io/client-go/informers/factory.go:160: Unexpected watch close - watch lasted less than a second and no items received Nov 28 13:18:48 crc kubenswrapper[4857]: W1128 13:18:48.133576 4857 reflector.go:484] object-"openshift-network-operator"/"iptables-alerter-script": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-operator"/"iptables-alerter-script": Unexpected watch close - watch lasted less than a second and no items received Nov 28 13:18:48 crc kubenswrapper[4857]: W1128 13:18:48.133600 4857 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.RuntimeClass ended with: very short watch: k8s.io/client-go/informers/factory.go:160: Unexpected watch close - watch lasted less than a second and no items received Nov 28 13:18:48 crc kubenswrapper[4857]: W1128 13:18:48.133604 4857 reflector.go:484] object-"openshift-network-node-identity"/"env-overrides": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-node-identity"/"env-overrides": Unexpected watch close - watch lasted less than a second and no items received Nov 28 13:18:48 crc kubenswrapper[4857]: E1128 13:18:48.133565 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Post \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases?timeout=10s\": read tcp 38.102.83.227:34724->38.102.83.227:6443: use of closed network connection" interval="6.4s" Nov 28 13:18:48 crc kubenswrapper[4857]: W1128 13:18:48.133628 4857 reflector.go:484] object-"openshift-network-operator"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-operator"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Nov 28 13:18:48 crc kubenswrapper[4857]: W1128 13:18:48.133633 4857 reflector.go:484] object-"openshift-network-node-identity"/"network-node-identity-cert": watch of *v1.Secret ended with: very short watch: object-"openshift-network-node-identity"/"network-node-identity-cert": Unexpected watch close - watch lasted less than a second and no items received Nov 28 13:18:48 crc kubenswrapper[4857]: E1128 13:18:48.133546 4857 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/events\": read tcp 38.102.83.227:34724->38.102.83.227:6443: use of closed network connection" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.187c2e29440c3318 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:d1b160f5dda77d281dd8e69ec8d817f9,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 13:18:28.79257884 +0000 UTC m=+0.819954047,LastTimestamp:2025-11-28 13:18:28.79257884 +0000 UTC m=+0.819954047,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 13:18:48 crc kubenswrapper[4857]: W1128 13:18:48.133879 4857 reflector.go:484] object-"openshift-network-node-identity"/"ovnkube-identity-cm": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-node-identity"/"ovnkube-identity-cm": Unexpected watch close - watch lasted less than a second and no items received Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.139400 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.254170 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.254243 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.254272 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:18:48 crc kubenswrapper[4857]: E1128 13:18:48.254369 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:18:49.254343224 +0000 UTC m=+21.281718391 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:18:48 crc kubenswrapper[4857]: E1128 13:18:48.254385 4857 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:18:48 crc kubenswrapper[4857]: E1128 13:18:48.254455 4857 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:18:48 crc kubenswrapper[4857]: E1128 13:18:48.254460 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:18:49.254447097 +0000 UTC m=+21.281822264 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:18:48 crc kubenswrapper[4857]: E1128 13:18:48.254515 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:18:49.254508309 +0000 UTC m=+21.281883476 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.312914 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.313767 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.315031 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.315813 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.316441 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.317511 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.318174 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.319141 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.319847 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.320843 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.321435 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.322107 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.325259 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.327011 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.330380 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.330936 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.333197 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.334134 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.334646 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.336108 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.336728 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.337215 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.338417 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.338952 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.339928 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.342273 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.342718 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.343892 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.344638 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.347976 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.348938 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.349461 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.349971 4857 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.350088 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.351860 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.353356 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.353898 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.355908 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.357086 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.357138 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:18:48 crc kubenswrapper[4857]: E1128 13:18:48.357278 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:18:48 crc kubenswrapper[4857]: E1128 13:18:48.357300 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:18:48 crc kubenswrapper[4857]: E1128 13:18:48.357312 4857 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:48 crc kubenswrapper[4857]: E1128 13:18:48.357357 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 13:18:49.357342938 +0000 UTC m=+21.384718095 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:48 crc kubenswrapper[4857]: E1128 13:18:48.357407 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:18:48 crc kubenswrapper[4857]: E1128 13:18:48.357417 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:18:48 crc kubenswrapper[4857]: E1128 13:18:48.357423 4857 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:48 crc kubenswrapper[4857]: E1128 13:18:48.357442 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 13:18:49.357435351 +0000 UTC m=+21.384810508 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.357641 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.360239 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.361169 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.362525 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.363200 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.367421 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.368745 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.370035 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.370637 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.371527 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.372129 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.373019 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.373921 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.375708 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.376341 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.376924 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.378261 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.378981 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.379567 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.396318 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.415812 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.425253 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"0e7eb271140ca6d07ad5d5953198ed7e5019a7fc88eda721d390551a4f8b748c"} Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.425894 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.427249 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f"} Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.427311 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b"} Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.427325 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"965ee4cddbe22b614d30d5400358979cb5b890370e1cef6c705960063469f9cb"} Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.428508 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"d1c743b6ffaeb580ae5728920e8dce2a9e5e788482328698ab078e29fd9bb8cd"} Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.429954 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.431664 4857 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c" exitCode=255 Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.431741 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c"} Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.439207 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.449051 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.449632 4857 scope.go:117] "RemoveContainer" containerID="8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.449783 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.458990 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.482288 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.499856 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.508623 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.523116 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.537323 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.547460 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.558615 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.666898 4857 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-11-28 13:13:47 +0000 UTC, rotation deadline is 2026-09-06 13:50:35.141717617 +0000 UTC Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.666964 4857 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6768h31m46.474757518s for next certificate rotation Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.975221 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-9f578"] Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.975649 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-9f578" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.977970 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.978233 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 13:18:48 crc kubenswrapper[4857]: I1128 13:18:48.978685 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.001993 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.012173 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.016623 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.030367 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.036858 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.045174 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.060552 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.062833 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/d49db47f-8c30-4756-92d5-2ae0be0c8f84-hosts-file\") pod \"node-resolver-9f578\" (UID: \"d49db47f-8c30-4756-92d5-2ae0be0c8f84\") " pod="openshift-dns/node-resolver-9f578" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.062902 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kn8d\" (UniqueName: \"kubernetes.io/projected/d49db47f-8c30-4756-92d5-2ae0be0c8f84-kube-api-access-6kn8d\") pod \"node-resolver-9f578\" (UID: \"d49db47f-8c30-4756-92d5-2ae0be0c8f84\") " pod="openshift-dns/node-resolver-9f578" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.079155 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.100635 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.112586 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.123486 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.135102 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.163882 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kn8d\" (UniqueName: \"kubernetes.io/projected/d49db47f-8c30-4756-92d5-2ae0be0c8f84-kube-api-access-6kn8d\") pod \"node-resolver-9f578\" (UID: \"d49db47f-8c30-4756-92d5-2ae0be0c8f84\") " pod="openshift-dns/node-resolver-9f578" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.163952 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/d49db47f-8c30-4756-92d5-2ae0be0c8f84-hosts-file\") pod \"node-resolver-9f578\" (UID: \"d49db47f-8c30-4756-92d5-2ae0be0c8f84\") " pod="openshift-dns/node-resolver-9f578" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.164032 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/d49db47f-8c30-4756-92d5-2ae0be0c8f84-hosts-file\") pod \"node-resolver-9f578\" (UID: \"d49db47f-8c30-4756-92d5-2ae0be0c8f84\") " pod="openshift-dns/node-resolver-9f578" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.186871 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kn8d\" (UniqueName: \"kubernetes.io/projected/d49db47f-8c30-4756-92d5-2ae0be0c8f84-kube-api-access-6kn8d\") pod \"node-resolver-9f578\" (UID: \"d49db47f-8c30-4756-92d5-2ae0be0c8f84\") " pod="openshift-dns/node-resolver-9f578" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.200489 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.264214 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.264283 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.264327 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:18:49 crc kubenswrapper[4857]: E1128 13:18:49.264413 4857 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:18:49 crc kubenswrapper[4857]: E1128 13:18:49.264434 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:18:51.264401065 +0000 UTC m=+23.291776232 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:18:49 crc kubenswrapper[4857]: E1128 13:18:49.264486 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:18:51.264461267 +0000 UTC m=+23.291836434 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:18:49 crc kubenswrapper[4857]: E1128 13:18:49.264486 4857 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:18:49 crc kubenswrapper[4857]: E1128 13:18:49.264573 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:18:51.26455641 +0000 UTC m=+23.291931577 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.291369 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-9f578" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.299359 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 13:18:49 crc kubenswrapper[4857]: W1128 13:18:49.307213 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd49db47f_8c30_4756_92d5_2ae0be0c8f84.slice/crio-2c7e2b405294a14ac2b31dded40a541a3105f8f15724bd2556ec2eff8c4c0a3d WatchSource:0}: Error finding container 2c7e2b405294a14ac2b31dded40a541a3105f8f15724bd2556ec2eff8c4c0a3d: Status 404 returned error can't find the container with id 2c7e2b405294a14ac2b31dded40a541a3105f8f15724bd2556ec2eff8c4c0a3d Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.308712 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:18:49 crc kubenswrapper[4857]: E1128 13:18:49.308880 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.309079 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.309144 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:18:49 crc kubenswrapper[4857]: E1128 13:18:49.309281 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:18:49 crc kubenswrapper[4857]: E1128 13:18:49.309157 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.357938 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-jdgls"] Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.358417 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-27d6k"] Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.358654 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.359536 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.361799 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.361811 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.361820 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.362210 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.362235 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.363186 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.364719 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.364790 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:18:49 crc kubenswrapper[4857]: E1128 13:18:49.364880 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:18:49 crc kubenswrapper[4857]: E1128 13:18:49.364906 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:18:49 crc kubenswrapper[4857]: E1128 13:18:49.364918 4857 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:49 crc kubenswrapper[4857]: E1128 13:18:49.364946 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:18:49 crc kubenswrapper[4857]: E1128 13:18:49.364966 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:18:49 crc kubenswrapper[4857]: E1128 13:18:49.364975 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 13:18:51.364958989 +0000 UTC m=+23.392334156 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:49 crc kubenswrapper[4857]: E1128 13:18:49.364979 4857 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:49 crc kubenswrapper[4857]: E1128 13:18:49.365026 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 13:18:51.365017751 +0000 UTC m=+23.392392918 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.370900 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.371125 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.371937 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.372621 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.372621 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-w25ss"] Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.373519 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-tzg2g"] Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.373840 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.374060 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.382045 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.382300 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.382319 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.382614 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.383450 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.383627 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.383837 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.385643 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.386366 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.386503 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.420233 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.440541 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.442700 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.444270 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.444274 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b"} Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.444668 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.446393 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b"} Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.450694 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-9f578" event={"ID":"d49db47f-8c30-4756-92d5-2ae0be0c8f84","Type":"ContainerStarted","Data":"2c7e2b405294a14ac2b31dded40a541a3105f8f15724bd2556ec2eff8c4c0a3d"} Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.462964 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.465526 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-run-ovn\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.465575 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-multus-cni-dir\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.465605 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-host-var-lib-kubelet\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.465656 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/bf74e995-2208-43c6-b89d-10318f55cda8-ovnkube-script-lib\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.465687 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-run-openvswitch\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.465711 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/bf74e995-2208-43c6-b89d-10318f55cda8-ovnkube-config\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.465732 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-host-run-k8s-cni-cncf-io\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.465806 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/1031bdc4-d6c6-4425-805b-506069f5667d-multus-daemon-config\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.465827 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-run-netns\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.465848 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-multus-socket-dir-parent\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.465871 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-host-var-lib-cni-bin\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.465891 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-hostroot\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.465914 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdp2n\" (UniqueName: \"kubernetes.io/projected/1031bdc4-d6c6-4425-805b-506069f5667d-kube-api-access-jdp2n\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.465939 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gc5bn\" (UniqueName: \"kubernetes.io/projected/aba2e99a-c0de-4ae5-b347-de1565fd9d68-kube-api-access-gc5bn\") pod \"machine-config-daemon-jdgls\" (UID: \"aba2e99a-c0de-4ae5-b347-de1565fd9d68\") " pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.465963 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-system-cni-dir\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.465986 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-host-var-lib-cni-multus\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466015 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-run-systemd\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466039 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/bf74e995-2208-43c6-b89d-10318f55cda8-ovn-node-metrics-cert\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466065 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/c9dfc021-dc50-485f-a833-e048ab7a390c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466088 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-slash\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466108 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/c9dfc021-dc50-485f-a833-e048ab7a390c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466141 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466167 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-os-release\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466190 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-kubelet\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466212 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nc22\" (UniqueName: \"kubernetes.io/projected/bf74e995-2208-43c6-b89d-10318f55cda8-kube-api-access-7nc22\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466232 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c9dfc021-dc50-485f-a833-e048ab7a390c-system-cni-dir\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466252 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/aba2e99a-c0de-4ae5-b347-de1565fd9d68-proxy-tls\") pod \"machine-config-daemon-jdgls\" (UID: \"aba2e99a-c0de-4ae5-b347-de1565fd9d68\") " pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466273 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-systemd-units\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466293 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/c9dfc021-dc50-485f-a833-e048ab7a390c-os-release\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466312 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-etc-openvswitch\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466330 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-node-log\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466349 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/bf74e995-2208-43c6-b89d-10318f55cda8-env-overrides\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466370 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlnph\" (UniqueName: \"kubernetes.io/projected/c9dfc021-dc50-485f-a833-e048ab7a390c-kube-api-access-hlnph\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466397 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/aba2e99a-c0de-4ae5-b347-de1565fd9d68-mcd-auth-proxy-config\") pod \"machine-config-daemon-jdgls\" (UID: \"aba2e99a-c0de-4ae5-b347-de1565fd9d68\") " pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466420 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-var-lib-openvswitch\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466441 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/1031bdc4-d6c6-4425-805b-506069f5667d-cni-binary-copy\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466461 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-multus-conf-dir\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466482 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-host-run-multus-certs\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466505 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-etc-kubernetes\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466526 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/aba2e99a-c0de-4ae5-b347-de1565fd9d68-rootfs\") pod \"machine-config-daemon-jdgls\" (UID: \"aba2e99a-c0de-4ae5-b347-de1565fd9d68\") " pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466546 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-log-socket\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466566 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-run-ovn-kubernetes\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466634 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-cni-bin\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466675 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-cni-netd\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466704 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/c9dfc021-dc50-485f-a833-e048ab7a390c-cni-binary-copy\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466731 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-cnibin\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466791 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/c9dfc021-dc50-485f-a833-e048ab7a390c-cnibin\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.466815 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-host-run-netns\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.487821 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.531613 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.564134 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.564173 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.567849 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c9dfc021-dc50-485f-a833-e048ab7a390c-system-cni-dir\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.567882 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-os-release\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.567901 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-kubelet\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.567921 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nc22\" (UniqueName: \"kubernetes.io/projected/bf74e995-2208-43c6-b89d-10318f55cda8-kube-api-access-7nc22\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.567937 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/aba2e99a-c0de-4ae5-b347-de1565fd9d68-proxy-tls\") pod \"machine-config-daemon-jdgls\" (UID: \"aba2e99a-c0de-4ae5-b347-de1565fd9d68\") " pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.567955 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-systemd-units\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.567970 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/c9dfc021-dc50-485f-a833-e048ab7a390c-os-release\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.567987 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-var-lib-openvswitch\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568002 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-etc-openvswitch\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568017 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-node-log\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.567994 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-kubelet\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568076 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-etc-openvswitch\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568103 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-node-log\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568102 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-systemd-units\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568127 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-var-lib-openvswitch\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568033 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/bf74e995-2208-43c6-b89d-10318f55cda8-env-overrides\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568163 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlnph\" (UniqueName: \"kubernetes.io/projected/c9dfc021-dc50-485f-a833-e048ab7a390c-kube-api-access-hlnph\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568268 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/c9dfc021-dc50-485f-a833-e048ab7a390c-os-release\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568298 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/aba2e99a-c0de-4ae5-b347-de1565fd9d68-mcd-auth-proxy-config\") pod \"machine-config-daemon-jdgls\" (UID: \"aba2e99a-c0de-4ae5-b347-de1565fd9d68\") " pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568334 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-etc-kubernetes\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568352 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/1031bdc4-d6c6-4425-805b-506069f5667d-cni-binary-copy\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568366 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-multus-conf-dir\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568382 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-host-run-multus-certs\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568388 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-etc-kubernetes\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568403 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/aba2e99a-c0de-4ae5-b347-de1565fd9d68-rootfs\") pod \"machine-config-daemon-jdgls\" (UID: \"aba2e99a-c0de-4ae5-b347-de1565fd9d68\") " pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568421 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-log-socket\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568438 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-run-ovn-kubernetes\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568467 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-host-run-multus-certs\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568480 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-cnibin\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568502 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-multus-conf-dir\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568528 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-cni-bin\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568535 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/aba2e99a-c0de-4ae5-b347-de1565fd9d68-rootfs\") pod \"machine-config-daemon-jdgls\" (UID: \"aba2e99a-c0de-4ae5-b347-de1565fd9d68\") " pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568515 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-cnibin\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568587 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-os-release\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568601 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-log-socket\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568627 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-cni-bin\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568627 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-run-ovn-kubernetes\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568669 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-cni-netd\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568704 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/c9dfc021-dc50-485f-a833-e048ab7a390c-cni-binary-copy\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568725 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-cni-netd\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568779 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/bf74e995-2208-43c6-b89d-10318f55cda8-env-overrides\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568864 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c9dfc021-dc50-485f-a833-e048ab7a390c-system-cni-dir\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568867 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/c9dfc021-dc50-485f-a833-e048ab7a390c-cnibin\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568902 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/c9dfc021-dc50-485f-a833-e048ab7a390c-cnibin\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568961 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-host-run-netns\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568988 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-host-run-netns\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.568994 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-run-ovn\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.569032 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/bf74e995-2208-43c6-b89d-10318f55cda8-ovnkube-script-lib\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.569040 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/aba2e99a-c0de-4ae5-b347-de1565fd9d68-mcd-auth-proxy-config\") pod \"machine-config-daemon-jdgls\" (UID: \"aba2e99a-c0de-4ae5-b347-de1565fd9d68\") " pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.569049 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/1031bdc4-d6c6-4425-805b-506069f5667d-cni-binary-copy\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.569059 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-multus-cni-dir\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.569042 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-run-ovn\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.569126 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-host-var-lib-kubelet\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.569167 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-run-openvswitch\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.569181 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-host-var-lib-kubelet\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.569399 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-run-openvswitch\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.569739 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-multus-cni-dir\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.569786 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/c9dfc021-dc50-485f-a833-e048ab7a390c-cni-binary-copy\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.569793 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/bf74e995-2208-43c6-b89d-10318f55cda8-ovnkube-config\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.569864 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-host-run-k8s-cni-cncf-io\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.569903 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-host-var-lib-cni-bin\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.569934 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/1031bdc4-d6c6-4425-805b-506069f5667d-multus-daemon-config\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.569964 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-run-netns\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570033 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-multus-socket-dir-parent\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570069 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-system-cni-dir\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570078 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-host-var-lib-cni-bin\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.569820 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/bf74e995-2208-43c6-b89d-10318f55cda8-ovnkube-script-lib\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570102 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-hostroot\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570136 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-hostroot\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570158 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdp2n\" (UniqueName: \"kubernetes.io/projected/1031bdc4-d6c6-4425-805b-506069f5667d-kube-api-access-jdp2n\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570190 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gc5bn\" (UniqueName: \"kubernetes.io/projected/aba2e99a-c0de-4ae5-b347-de1565fd9d68-kube-api-access-gc5bn\") pod \"machine-config-daemon-jdgls\" (UID: \"aba2e99a-c0de-4ae5-b347-de1565fd9d68\") " pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570222 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-host-var-lib-cni-multus\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570255 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-run-systemd\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570284 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/bf74e995-2208-43c6-b89d-10318f55cda8-ovn-node-metrics-cert\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570310 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/c9dfc021-dc50-485f-a833-e048ab7a390c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570340 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-slash\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570370 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/c9dfc021-dc50-485f-a833-e048ab7a390c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570399 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570477 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-multus-socket-dir-parent\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570037 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/bf74e995-2208-43c6-b89d-10318f55cda8-ovnkube-config\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570509 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570610 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-system-cni-dir\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570644 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-host-run-k8s-cni-cncf-io\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570737 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/1031bdc4-d6c6-4425-805b-506069f5667d-multus-daemon-config\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.570813 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-slash\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.571159 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/c9dfc021-dc50-485f-a833-e048ab7a390c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.571172 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/1031bdc4-d6c6-4425-805b-506069f5667d-host-var-lib-cni-multus\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.571225 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-run-systemd\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.571367 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/c9dfc021-dc50-485f-a833-e048ab7a390c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.573983 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-run-netns\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.579171 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/bf74e995-2208-43c6-b89d-10318f55cda8-ovn-node-metrics-cert\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.586165 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/aba2e99a-c0de-4ae5-b347-de1565fd9d68-proxy-tls\") pod \"machine-config-daemon-jdgls\" (UID: \"aba2e99a-c0de-4ae5-b347-de1565fd9d68\") " pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.590075 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.597188 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nc22\" (UniqueName: \"kubernetes.io/projected/bf74e995-2208-43c6-b89d-10318f55cda8-kube-api-access-7nc22\") pod \"ovnkube-node-w25ss\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.600185 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gc5bn\" (UniqueName: \"kubernetes.io/projected/aba2e99a-c0de-4ae5-b347-de1565fd9d68-kube-api-access-gc5bn\") pod \"machine-config-daemon-jdgls\" (UID: \"aba2e99a-c0de-4ae5-b347-de1565fd9d68\") " pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.603269 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlnph\" (UniqueName: \"kubernetes.io/projected/c9dfc021-dc50-485f-a833-e048ab7a390c-kube-api-access-hlnph\") pod \"multus-additional-cni-plugins-27d6k\" (UID: \"c9dfc021-dc50-485f-a833-e048ab7a390c\") " pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.603277 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdp2n\" (UniqueName: \"kubernetes.io/projected/1031bdc4-d6c6-4425-805b-506069f5667d-kube-api-access-jdp2n\") pod \"multus-tzg2g\" (UID: \"1031bdc4-d6c6-4425-805b-506069f5667d\") " pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.609685 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.619385 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.622658 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.632606 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.643721 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.657238 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.658126 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.675244 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.675510 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.687874 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-27d6k" Nov 28 13:18:49 crc kubenswrapper[4857]: W1128 13:18:49.699674 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc9dfc021_dc50_485f_a833_e048ab7a390c.slice/crio-ae564b149062e14c76c14037be5927756041635357213c8c608f518d5cf73a34 WatchSource:0}: Error finding container ae564b149062e14c76c14037be5927756041635357213c8c608f518d5cf73a34: Status 404 returned error can't find the container with id ae564b149062e14c76c14037be5927756041635357213c8c608f518d5cf73a34 Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.703383 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-tzg2g" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.705720 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.709475 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.727372 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.739342 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.752826 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.781924 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.803926 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.826827 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.845728 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.869025 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.887640 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.904102 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:49 crc kubenswrapper[4857]: I1128 13:18:49.924090 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.454698 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf74e995-2208-43c6-b89d-10318f55cda8" containerID="6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809" exitCode=0 Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.454784 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerDied","Data":"6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809"} Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.455137 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerStarted","Data":"1f6a0725a20305bed641ca83f1d683236e70ef76e05daa6e5c7042edffd2ae57"} Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.458514 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-tzg2g" event={"ID":"1031bdc4-d6c6-4425-805b-506069f5667d","Type":"ContainerStarted","Data":"88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a"} Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.458577 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-tzg2g" event={"ID":"1031bdc4-d6c6-4425-805b-506069f5667d","Type":"ContainerStarted","Data":"9db5e73f57a671536af6d782f517e36fabd8bf48a0c94308830d78bd35a3f893"} Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.466607 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerStarted","Data":"14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83"} Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.466647 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerStarted","Data":"ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013"} Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.466658 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerStarted","Data":"bf23b45fa8e7dbcdd8c63ab7e1cabfd8ecf378d746e681af0ae20b803ab77857"} Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.468639 4857 generic.go:334] "Generic (PLEG): container finished" podID="c9dfc021-dc50-485f-a833-e048ab7a390c" containerID="655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe" exitCode=0 Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.468709 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" event={"ID":"c9dfc021-dc50-485f-a833-e048ab7a390c","Type":"ContainerDied","Data":"655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe"} Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.468735 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" event={"ID":"c9dfc021-dc50-485f-a833-e048ab7a390c","Type":"ContainerStarted","Data":"ae564b149062e14c76c14037be5927756041635357213c8c608f518d5cf73a34"} Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.470653 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-9f578" event={"ID":"d49db47f-8c30-4756-92d5-2ae0be0c8f84","Type":"ContainerStarted","Data":"d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0"} Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.474955 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.492849 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.507129 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.527727 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.553573 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.575252 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.589011 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.610515 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.626828 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.666633 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.704112 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.715906 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.728554 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.741770 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.756004 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.777969 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.791329 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.802581 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.815894 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.831302 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.884306 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.899200 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.919617 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.936697 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.952202 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.968777 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:50 crc kubenswrapper[4857]: I1128 13:18:50.990586 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.003828 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.097010 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.098995 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.099029 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.099040 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.099174 4857 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.106662 4857 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.106935 4857 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.109235 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.109325 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.109339 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.109385 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.109405 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:51Z","lastTransitionTime":"2025-11-28T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.126950 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.131936 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.131980 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.131989 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.132003 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.132013 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:51Z","lastTransitionTime":"2025-11-28T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.144826 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.149992 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.150031 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.150041 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.150059 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.150071 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:51Z","lastTransitionTime":"2025-11-28T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.162585 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.166093 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.166155 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.166166 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.166183 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.166198 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:51Z","lastTransitionTime":"2025-11-28T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.179337 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.225977 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.226010 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.226019 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.226032 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.226041 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:51Z","lastTransitionTime":"2025-11-28T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.245554 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.246680 4857 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.254219 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.254294 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.254310 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.254332 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.254351 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:51Z","lastTransitionTime":"2025-11-28T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.292172 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.292300 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.292323 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.292441 4857 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.292485 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:18:55.292473156 +0000 UTC m=+27.319848313 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.292539 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:18:55.292532518 +0000 UTC m=+27.319907685 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.292582 4857 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.292601 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:18:55.29259546 +0000 UTC m=+27.319970617 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.303714 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-w8b2n"] Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.304412 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-w8b2n" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.305842 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.306646 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.307093 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.307285 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.309815 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.309868 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.309817 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.309999 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.309922 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.310092 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.322663 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.334791 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.353602 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.357928 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.357967 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.357977 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.357994 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.358007 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:51Z","lastTransitionTime":"2025-11-28T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.368903 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.382237 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.393292 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/86b54d8d-03e5-4e53-906f-66060d30608d-host\") pod \"node-ca-w8b2n\" (UID: \"86b54d8d-03e5-4e53-906f-66060d30608d\") " pod="openshift-image-registry/node-ca-w8b2n" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.393451 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.393502 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/86b54d8d-03e5-4e53-906f-66060d30608d-serviceca\") pod \"node-ca-w8b2n\" (UID: \"86b54d8d-03e5-4e53-906f-66060d30608d\") " pod="openshift-image-registry/node-ca-w8b2n" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.393545 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.393606 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6wxd\" (UniqueName: \"kubernetes.io/projected/86b54d8d-03e5-4e53-906f-66060d30608d-kube-api-access-l6wxd\") pod \"node-ca-w8b2n\" (UID: \"86b54d8d-03e5-4e53-906f-66060d30608d\") " pod="openshift-image-registry/node-ca-w8b2n" Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.393701 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.393739 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.393770 4857 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.393818 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.393868 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.393884 4857 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.393833 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 13:18:55.393810403 +0000 UTC m=+27.421185570 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:51 crc kubenswrapper[4857]: E1128 13:18:51.393990 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 13:18:55.393971138 +0000 UTC m=+27.421346475 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.408668 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.435141 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.448351 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.460363 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.460398 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.460408 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.460421 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.460432 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:51Z","lastTransitionTime":"2025-11-28T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.470408 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.475757 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294"} Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.479632 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerStarted","Data":"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee"} Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.479690 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerStarted","Data":"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439"} Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.479708 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerStarted","Data":"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01"} Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.479720 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerStarted","Data":"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c"} Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.482499 4857 generic.go:334] "Generic (PLEG): container finished" podID="c9dfc021-dc50-485f-a833-e048ab7a390c" containerID="327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011" exitCode=0 Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.482608 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" event={"ID":"c9dfc021-dc50-485f-a833-e048ab7a390c","Type":"ContainerDied","Data":"327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011"} Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.487642 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.494624 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6wxd\" (UniqueName: \"kubernetes.io/projected/86b54d8d-03e5-4e53-906f-66060d30608d-kube-api-access-l6wxd\") pod \"node-ca-w8b2n\" (UID: \"86b54d8d-03e5-4e53-906f-66060d30608d\") " pod="openshift-image-registry/node-ca-w8b2n" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.494668 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/86b54d8d-03e5-4e53-906f-66060d30608d-host\") pod \"node-ca-w8b2n\" (UID: \"86b54d8d-03e5-4e53-906f-66060d30608d\") " pod="openshift-image-registry/node-ca-w8b2n" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.494713 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/86b54d8d-03e5-4e53-906f-66060d30608d-serviceca\") pod \"node-ca-w8b2n\" (UID: \"86b54d8d-03e5-4e53-906f-66060d30608d\") " pod="openshift-image-registry/node-ca-w8b2n" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.495238 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/86b54d8d-03e5-4e53-906f-66060d30608d-host\") pod \"node-ca-w8b2n\" (UID: \"86b54d8d-03e5-4e53-906f-66060d30608d\") " pod="openshift-image-registry/node-ca-w8b2n" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.495584 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/86b54d8d-03e5-4e53-906f-66060d30608d-serviceca\") pod \"node-ca-w8b2n\" (UID: \"86b54d8d-03e5-4e53-906f-66060d30608d\") " pod="openshift-image-registry/node-ca-w8b2n" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.501177 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.516731 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6wxd\" (UniqueName: \"kubernetes.io/projected/86b54d8d-03e5-4e53-906f-66060d30608d-kube-api-access-l6wxd\") pod \"node-ca-w8b2n\" (UID: \"86b54d8d-03e5-4e53-906f-66060d30608d\") " pod="openshift-image-registry/node-ca-w8b2n" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.519190 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.538369 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.550897 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.562374 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.562417 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.562428 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.562448 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.562477 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:51Z","lastTransitionTime":"2025-11-28T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.567596 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.587975 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.615101 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.635916 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.644670 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-w8b2n" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.655174 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.665372 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.665403 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.665411 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.665426 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.665435 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:51Z","lastTransitionTime":"2025-11-28T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.667247 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.708892 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.759108 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.767914 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.767961 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.767975 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.767993 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.768005 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:51Z","lastTransitionTime":"2025-11-28T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.793674 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.830921 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.868452 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.873103 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.873136 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.873148 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.873166 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.873179 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:51Z","lastTransitionTime":"2025-11-28T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.913094 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.948458 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.977106 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.977176 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.977194 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.977219 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.977236 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:51Z","lastTransitionTime":"2025-11-28T13:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:51 crc kubenswrapper[4857]: I1128 13:18:51.991095 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:51Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.030141 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.072521 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.079044 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.079100 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.079111 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.079127 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.079138 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:52Z","lastTransitionTime":"2025-11-28T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.182234 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.182271 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.182279 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.182294 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.182307 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:52Z","lastTransitionTime":"2025-11-28T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.284941 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.284977 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.284985 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.285000 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.285009 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:52Z","lastTransitionTime":"2025-11-28T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.387377 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.387518 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.387549 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.387577 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.387594 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:52Z","lastTransitionTime":"2025-11-28T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.489355 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerStarted","Data":"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6"} Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.489384 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.489394 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerStarted","Data":"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e"} Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.489401 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.489409 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.489421 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.489429 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:52Z","lastTransitionTime":"2025-11-28T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.492045 4857 generic.go:334] "Generic (PLEG): container finished" podID="c9dfc021-dc50-485f-a833-e048ab7a390c" containerID="691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73" exitCode=0 Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.492100 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" event={"ID":"c9dfc021-dc50-485f-a833-e048ab7a390c","Type":"ContainerDied","Data":"691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73"} Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.493871 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-w8b2n" event={"ID":"86b54d8d-03e5-4e53-906f-66060d30608d","Type":"ContainerStarted","Data":"1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7"} Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.493913 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-w8b2n" event={"ID":"86b54d8d-03e5-4e53-906f-66060d30608d","Type":"ContainerStarted","Data":"0b0ff5ca424cfb2e56feabf1eb48f0c3936e43cbab794e98b691f546f59e561f"} Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.516087 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.544367 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.560615 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.577569 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.590815 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.591327 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.591366 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.591379 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.591395 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.591408 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:52Z","lastTransitionTime":"2025-11-28T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.600528 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.613739 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.625690 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.641304 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.655589 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.673964 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.686240 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.694163 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.694214 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.694236 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.694254 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.694267 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:52Z","lastTransitionTime":"2025-11-28T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.699044 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.709266 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.720646 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.734882 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.750299 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.797049 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.797104 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.797115 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.797131 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.797142 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:52Z","lastTransitionTime":"2025-11-28T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.803768 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.829828 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.874296 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.899197 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.899228 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.899237 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.899251 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.899261 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:52Z","lastTransitionTime":"2025-11-28T13:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.913966 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.947873 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:52 crc kubenswrapper[4857]: I1128 13:18:52.988346 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:52Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.002441 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.002496 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.002515 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.002542 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.002561 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:53Z","lastTransitionTime":"2025-11-28T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.045209 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.074056 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.104998 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.105078 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.105096 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.105147 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.105167 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:53Z","lastTransitionTime":"2025-11-28T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.113075 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.150994 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.192565 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.208311 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.208370 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.208382 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.208400 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.208412 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:53Z","lastTransitionTime":"2025-11-28T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.231720 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.275322 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.308820 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.308891 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:18:53 crc kubenswrapper[4857]: E1128 13:18:53.308991 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:18:53 crc kubenswrapper[4857]: E1128 13:18:53.309076 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.308910 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:18:53 crc kubenswrapper[4857]: E1128 13:18:53.309201 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.310903 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.311056 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.311143 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.311223 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.311297 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:53Z","lastTransitionTime":"2025-11-28T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.414445 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.414505 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.414523 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.414549 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.414567 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:53Z","lastTransitionTime":"2025-11-28T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.501442 4857 generic.go:334] "Generic (PLEG): container finished" podID="c9dfc021-dc50-485f-a833-e048ab7a390c" containerID="dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4" exitCode=0 Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.501516 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" event={"ID":"c9dfc021-dc50-485f-a833-e048ab7a390c","Type":"ContainerDied","Data":"dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4"} Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.520995 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.521055 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.521072 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.521101 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.521119 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:53Z","lastTransitionTime":"2025-11-28T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.526366 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.541657 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.554708 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.566708 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.579695 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.593013 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.605135 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.622780 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.625394 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.625446 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.625465 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.625489 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.625509 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:53Z","lastTransitionTime":"2025-11-28T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.644965 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.672593 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.713053 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.727789 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.727848 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.727865 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.727889 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.727905 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:53Z","lastTransitionTime":"2025-11-28T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.748046 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.787867 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.831121 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.832637 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.832685 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.832702 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.832727 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.832744 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:53Z","lastTransitionTime":"2025-11-28T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.874545 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.934964 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.935029 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.935052 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.935081 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:53 crc kubenswrapper[4857]: I1128 13:18:53.935107 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:53Z","lastTransitionTime":"2025-11-28T13:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.037530 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.037574 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.037584 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.037601 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.037612 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:54Z","lastTransitionTime":"2025-11-28T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.141047 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.141085 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.141094 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.141109 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.141122 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:54Z","lastTransitionTime":"2025-11-28T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.245553 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.245655 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.245682 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.245718 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.245742 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:54Z","lastTransitionTime":"2025-11-28T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.348225 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.348296 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.348319 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.348349 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.348371 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:54Z","lastTransitionTime":"2025-11-28T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.451219 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.451266 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.451277 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.451293 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.451305 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:54Z","lastTransitionTime":"2025-11-28T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.510125 4857 generic.go:334] "Generic (PLEG): container finished" podID="c9dfc021-dc50-485f-a833-e048ab7a390c" containerID="d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527" exitCode=0 Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.510246 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" event={"ID":"c9dfc021-dc50-485f-a833-e048ab7a390c","Type":"ContainerDied","Data":"d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527"} Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.531132 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerStarted","Data":"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1"} Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.535486 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:54Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.554951 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.555009 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.555032 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.555064 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.555087 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:54Z","lastTransitionTime":"2025-11-28T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.559417 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:54Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.574973 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:54Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.585305 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:54Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.604441 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:54Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.623216 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:54Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.638290 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:54Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.652092 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:54Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.662301 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.662339 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.662350 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.662370 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.662383 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:54Z","lastTransitionTime":"2025-11-28T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.673769 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:54Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.687373 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:54Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.700396 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:54Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.715270 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:54Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.726427 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:54Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.738612 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:54Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.752715 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:54Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.765377 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.765420 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.765428 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.765447 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.765458 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:54Z","lastTransitionTime":"2025-11-28T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.868569 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.868632 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.868655 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.868687 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.868709 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:54Z","lastTransitionTime":"2025-11-28T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.908793 4857 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.972376 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.972436 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.972453 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.972478 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:54 crc kubenswrapper[4857]: I1128 13:18:54.972500 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:54Z","lastTransitionTime":"2025-11-28T13:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.075191 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.075251 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.075272 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.075297 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.075318 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:55Z","lastTransitionTime":"2025-11-28T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.178172 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.178207 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.178218 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.178237 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.178249 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:55Z","lastTransitionTime":"2025-11-28T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.281019 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.281081 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.281101 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.281126 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.281143 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:55Z","lastTransitionTime":"2025-11-28T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.308885 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.308991 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:18:55 crc kubenswrapper[4857]: E1128 13:18:55.309082 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:18:55 crc kubenswrapper[4857]: E1128 13:18:55.309175 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.309296 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:18:55 crc kubenswrapper[4857]: E1128 13:18:55.309501 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.342058 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:18:55 crc kubenswrapper[4857]: E1128 13:18:55.342325 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:19:03.342282647 +0000 UTC m=+35.369657964 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.342580 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.342774 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:18:55 crc kubenswrapper[4857]: E1128 13:18:55.342836 4857 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:18:55 crc kubenswrapper[4857]: E1128 13:18:55.343000 4857 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:18:55 crc kubenswrapper[4857]: E1128 13:18:55.343018 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:19:03.342999298 +0000 UTC m=+35.370374465 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:18:55 crc kubenswrapper[4857]: E1128 13:18:55.343150 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:19:03.343130102 +0000 UTC m=+35.370505269 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.384595 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.384652 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.384669 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.384692 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.384708 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:55Z","lastTransitionTime":"2025-11-28T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.443732 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.444908 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:18:55 crc kubenswrapper[4857]: E1128 13:18:55.444379 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:18:55 crc kubenswrapper[4857]: E1128 13:18:55.445152 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:18:55 crc kubenswrapper[4857]: E1128 13:18:55.445326 4857 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:55 crc kubenswrapper[4857]: E1128 13:18:55.445481 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 13:19:03.445456406 +0000 UTC m=+35.472831573 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:55 crc kubenswrapper[4857]: E1128 13:18:55.445239 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:18:55 crc kubenswrapper[4857]: E1128 13:18:55.445673 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:18:55 crc kubenswrapper[4857]: E1128 13:18:55.445744 4857 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:55 crc kubenswrapper[4857]: E1128 13:18:55.445887 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 13:19:03.445874978 +0000 UTC m=+35.473250145 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.489084 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.489140 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.489159 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.489183 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.489199 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:55Z","lastTransitionTime":"2025-11-28T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.543317 4857 generic.go:334] "Generic (PLEG): container finished" podID="c9dfc021-dc50-485f-a833-e048ab7a390c" containerID="5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a" exitCode=0 Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.543380 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" event={"ID":"c9dfc021-dc50-485f-a833-e048ab7a390c","Type":"ContainerDied","Data":"5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a"} Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.567794 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.584699 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.593453 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.593518 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.593532 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.593554 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.593568 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:55Z","lastTransitionTime":"2025-11-28T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.602502 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.617771 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.631274 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.656353 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.672174 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.685407 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.696608 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.696647 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.696659 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.696680 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.696693 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:55Z","lastTransitionTime":"2025-11-28T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.705963 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.720227 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.732890 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.745952 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.763392 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.778243 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.792670 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.800257 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.800318 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.800339 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.800366 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.800386 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:55Z","lastTransitionTime":"2025-11-28T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.903195 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.903246 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.903264 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.903286 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:55 crc kubenswrapper[4857]: I1128 13:18:55.903305 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:55Z","lastTransitionTime":"2025-11-28T13:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.006522 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.006573 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.006586 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.006606 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.006620 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:56Z","lastTransitionTime":"2025-11-28T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.110096 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.110180 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.110200 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.110227 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.110247 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:56Z","lastTransitionTime":"2025-11-28T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.214585 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.214658 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.214682 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.214711 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.214728 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:56Z","lastTransitionTime":"2025-11-28T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.318276 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.318355 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.318379 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.318410 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.318432 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:56Z","lastTransitionTime":"2025-11-28T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.422107 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.422144 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.422156 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.422171 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.422182 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:56Z","lastTransitionTime":"2025-11-28T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.525734 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.525826 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.525844 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.525870 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.525888 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:56Z","lastTransitionTime":"2025-11-28T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.627719 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.627803 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.627815 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.627838 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.627852 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:56Z","lastTransitionTime":"2025-11-28T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.694596 4857 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.730407 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.730452 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.730461 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.730478 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.730491 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:56Z","lastTransitionTime":"2025-11-28T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.838465 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.838556 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.838583 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.838613 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.838647 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:56Z","lastTransitionTime":"2025-11-28T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.942415 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.942474 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.942492 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.942517 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:56 crc kubenswrapper[4857]: I1128 13:18:56.942607 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:56Z","lastTransitionTime":"2025-11-28T13:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.045177 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.045247 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.045265 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.045673 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.045724 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:57Z","lastTransitionTime":"2025-11-28T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.148379 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.148411 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.148422 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.148439 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.148450 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:57Z","lastTransitionTime":"2025-11-28T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.252261 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.252327 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.252342 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.252361 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.252377 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:57Z","lastTransitionTime":"2025-11-28T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.309150 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.309294 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:18:57 crc kubenswrapper[4857]: E1128 13:18:57.309328 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.309411 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:18:57 crc kubenswrapper[4857]: E1128 13:18:57.309420 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:18:57 crc kubenswrapper[4857]: E1128 13:18:57.309608 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.355952 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.356312 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.356327 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.356348 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.356362 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:57Z","lastTransitionTime":"2025-11-28T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.458501 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.458547 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.458561 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.458577 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.458588 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:57Z","lastTransitionTime":"2025-11-28T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.560838 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.560911 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.560934 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.560963 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.560985 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:57Z","lastTransitionTime":"2025-11-28T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.561427 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerStarted","Data":"587eabbf22cb05ef5448589737f83397316b62a2513f9274eec3a219ca48183d"} Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.561894 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.567550 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" event={"ID":"c9dfc021-dc50-485f-a833-e048ab7a390c","Type":"ContainerStarted","Data":"89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440"} Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.581816 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.590558 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.596184 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.607394 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.619470 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.631855 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.642122 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.658680 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.663263 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.663318 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.663332 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.663353 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.663366 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:57Z","lastTransitionTime":"2025-11-28T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.676741 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.688660 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.708918 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.724106 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.752926 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.766717 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.766801 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.766818 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.766835 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.766847 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:57Z","lastTransitionTime":"2025-11-28T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.776577 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.795196 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.822040 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://587eabbf22cb05ef5448589737f83397316b62a2513f9274eec3a219ca48183d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.836088 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.846821 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.856591 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.869581 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.869632 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.869644 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.869663 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.869676 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:57Z","lastTransitionTime":"2025-11-28T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.876655 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.894398 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.908300 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.929893 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://587eabbf22cb05ef5448589737f83397316b62a2513f9274eec3a219ca48183d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.946971 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.959848 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.972835 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.972908 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.972934 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.972965 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.972989 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:57Z","lastTransitionTime":"2025-11-28T13:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.973000 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.985366 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:57 crc kubenswrapper[4857]: I1128 13:18:57.998927 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:57Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.012136 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.025082 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.038070 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.075623 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.075661 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.075669 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.075684 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.075693 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:58Z","lastTransitionTime":"2025-11-28T13:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.178689 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.178730 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.178743 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.178771 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.178782 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:58Z","lastTransitionTime":"2025-11-28T13:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.281477 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.281551 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.281569 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.281597 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.281613 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:58Z","lastTransitionTime":"2025-11-28T13:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.324748 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.341597 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.360342 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.381679 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.384142 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.384213 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.384233 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.384259 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.384278 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:58Z","lastTransitionTime":"2025-11-28T13:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.407287 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.430675 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.457213 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.470129 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.486978 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.487020 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.487033 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.487050 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.487062 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:58Z","lastTransitionTime":"2025-11-28T13:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.490185 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.501368 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.511557 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.533999 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.555626 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.571038 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.571068 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.575736 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.590189 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.590243 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.590258 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.590280 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.590295 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:58Z","lastTransitionTime":"2025-11-28T13:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.600355 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.602294 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://587eabbf22cb05ef5448589737f83397316b62a2513f9274eec3a219ca48183d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.624415 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.641858 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.655384 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.669262 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.681634 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.693162 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.693202 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.693214 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.693231 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.693240 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:58Z","lastTransitionTime":"2025-11-28T13:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.694243 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.711388 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.728960 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.748117 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.769300 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.783334 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.796298 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.796348 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.796358 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.796378 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.796389 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:58Z","lastTransitionTime":"2025-11-28T13:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.808131 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.826782 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.846261 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.867077 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://587eabbf22cb05ef5448589737f83397316b62a2513f9274eec3a219ca48183d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:18:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.899435 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.899489 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.899498 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.899516 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:58 crc kubenswrapper[4857]: I1128 13:18:58.899530 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:58Z","lastTransitionTime":"2025-11-28T13:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.001568 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.001622 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.001635 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.001654 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.001666 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:59Z","lastTransitionTime":"2025-11-28T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.104204 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.104257 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.104275 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.104302 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.104321 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:59Z","lastTransitionTime":"2025-11-28T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.211982 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.212045 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.212076 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.212104 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.212123 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:59Z","lastTransitionTime":"2025-11-28T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.308812 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.308967 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.308845 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:18:59 crc kubenswrapper[4857]: E1128 13:18:59.309034 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:18:59 crc kubenswrapper[4857]: E1128 13:18:59.309192 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:18:59 crc kubenswrapper[4857]: E1128 13:18:59.309296 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.315810 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.315837 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.315849 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.315864 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.315875 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:59Z","lastTransitionTime":"2025-11-28T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.419335 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.419408 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.419425 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.419452 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.419472 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:59Z","lastTransitionTime":"2025-11-28T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.522379 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.522446 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.522470 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.522500 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.522524 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:59Z","lastTransitionTime":"2025-11-28T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.625708 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.625794 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.625821 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.625864 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.625883 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:59Z","lastTransitionTime":"2025-11-28T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.728987 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.729052 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.729071 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.729099 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.729119 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:59Z","lastTransitionTime":"2025-11-28T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.832532 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.832595 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.832613 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.832637 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.832656 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:59Z","lastTransitionTime":"2025-11-28T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.935073 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.935132 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.935149 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.935173 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:18:59 crc kubenswrapper[4857]: I1128 13:18:59.935189 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:18:59Z","lastTransitionTime":"2025-11-28T13:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.038433 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.038507 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.038525 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.038547 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.038563 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:00Z","lastTransitionTime":"2025-11-28T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.142404 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.142486 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.142511 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.142543 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.142571 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:00Z","lastTransitionTime":"2025-11-28T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.245700 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.245812 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.245839 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.245869 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.245894 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:00Z","lastTransitionTime":"2025-11-28T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.348502 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.348555 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.348575 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.348603 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.348625 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:00Z","lastTransitionTime":"2025-11-28T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.452290 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.452350 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.452368 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.452396 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.452414 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:00Z","lastTransitionTime":"2025-11-28T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.555439 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.555513 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.555536 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.555565 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.555588 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:00Z","lastTransitionTime":"2025-11-28T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.582525 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovnkube-controller/0.log" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.586782 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf74e995-2208-43c6-b89d-10318f55cda8" containerID="587eabbf22cb05ef5448589737f83397316b62a2513f9274eec3a219ca48183d" exitCode=1 Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.586852 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerDied","Data":"587eabbf22cb05ef5448589737f83397316b62a2513f9274eec3a219ca48183d"} Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.588247 4857 scope.go:117] "RemoveContainer" containerID="587eabbf22cb05ef5448589737f83397316b62a2513f9274eec3a219ca48183d" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.618248 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.646448 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.658000 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.658039 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.658050 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.658068 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.658079 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:00Z","lastTransitionTime":"2025-11-28T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.667607 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.687677 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.704855 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.721285 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.743403 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://587eabbf22cb05ef5448589737f83397316b62a2513f9274eec3a219ca48183d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587eabbf22cb05ef5448589737f83397316b62a2513f9274eec3a219ca48183d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:00Z\\\",\\\"message\\\":\\\"xternalversions/factory.go:140\\\\nI1128 13:18:59.192030 6168 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.192068 6168 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.192223 6168 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 13:18:59.192523 6168 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 13:18:59.192615 6168 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1128 13:18:59.192708 6168 reflector.go:311] Stopping reflector *v1.ClusterUserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.193171 6168 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.761954 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.762013 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.762028 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.762051 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.762068 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:00Z","lastTransitionTime":"2025-11-28T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.764918 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.782942 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.801870 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.815718 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.833284 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.855675 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.865006 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.865057 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.865070 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.865088 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.865101 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:00Z","lastTransitionTime":"2025-11-28T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.869429 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.884527 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.967602 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.967646 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.967659 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.967677 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:00 crc kubenswrapper[4857]: I1128 13:19:00.967691 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:00Z","lastTransitionTime":"2025-11-28T13:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.070315 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.070396 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.070417 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.070447 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.070467 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:01Z","lastTransitionTime":"2025-11-28T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.173170 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.173212 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.173224 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.173240 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.173252 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:01Z","lastTransitionTime":"2025-11-28T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.275731 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.275818 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.275837 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.275860 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.275877 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:01Z","lastTransitionTime":"2025-11-28T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.308423 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.308558 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:01 crc kubenswrapper[4857]: E1128 13:19:01.308605 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.308685 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:01 crc kubenswrapper[4857]: E1128 13:19:01.308780 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:01 crc kubenswrapper[4857]: E1128 13:19:01.308874 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.345701 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.345735 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.345765 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.345788 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.345799 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:01Z","lastTransitionTime":"2025-11-28T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:01 crc kubenswrapper[4857]: E1128 13:19:01.358964 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.363306 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.363371 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.363384 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.363405 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.363417 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:01Z","lastTransitionTime":"2025-11-28T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:01 crc kubenswrapper[4857]: E1128 13:19:01.376638 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.380663 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.380702 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.380713 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.380730 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.380741 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:01Z","lastTransitionTime":"2025-11-28T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:01 crc kubenswrapper[4857]: E1128 13:19:01.393612 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.396884 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.396916 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.396927 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.396943 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.396953 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:01Z","lastTransitionTime":"2025-11-28T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:01 crc kubenswrapper[4857]: E1128 13:19:01.409344 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.413957 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.414010 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.414025 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.414043 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.414054 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:01Z","lastTransitionTime":"2025-11-28T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:01 crc kubenswrapper[4857]: E1128 13:19:01.427176 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: E1128 13:19:01.427351 4857 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.429507 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.429549 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.429561 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.429582 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.429598 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:01Z","lastTransitionTime":"2025-11-28T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.532522 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.532595 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.532616 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.532646 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.532669 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:01Z","lastTransitionTime":"2025-11-28T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.593244 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovnkube-controller/0.log" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.599192 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerStarted","Data":"6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0"} Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.600385 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.620819 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.635713 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.636285 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.636317 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.636341 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.636358 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:01Z","lastTransitionTime":"2025-11-28T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.653638 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587eabbf22cb05ef5448589737f83397316b62a2513f9274eec3a219ca48183d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:00Z\\\",\\\"message\\\":\\\"xternalversions/factory.go:140\\\\nI1128 13:18:59.192030 6168 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.192068 6168 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.192223 6168 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 13:18:59.192523 6168 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 13:18:59.192615 6168 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1128 13:18:59.192708 6168 reflector.go:311] Stopping reflector *v1.ClusterUserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.193171 6168 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.666407 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.682476 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.698000 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.713162 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.725586 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.736881 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.738803 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.738830 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.738840 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.738854 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.738863 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:01Z","lastTransitionTime":"2025-11-28T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.750237 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.763851 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.777179 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.789888 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.806440 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.826998 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.841883 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.841928 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.841940 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.841957 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.841968 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:01Z","lastTransitionTime":"2025-11-28T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.842332 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:01Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.944888 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.944936 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.944948 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.944963 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:01 crc kubenswrapper[4857]: I1128 13:19:01.944973 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:01Z","lastTransitionTime":"2025-11-28T13:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.047113 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.047195 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.047206 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.047222 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.047231 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:02Z","lastTransitionTime":"2025-11-28T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.149990 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.150028 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.150037 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.150052 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.150064 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:02Z","lastTransitionTime":"2025-11-28T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.255175 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.255228 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.255248 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.255271 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.255288 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:02Z","lastTransitionTime":"2025-11-28T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.257579 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc"] Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.258345 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.260871 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.261145 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.277200 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.296541 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.309531 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.323835 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.331210 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1de596ae-343e-4839-b049-61fb6b8fe7c8-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7mglc\" (UID: \"1de596ae-343e-4839-b049-61fb6b8fe7c8\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.331294 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1de596ae-343e-4839-b049-61fb6b8fe7c8-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7mglc\" (UID: \"1de596ae-343e-4839-b049-61fb6b8fe7c8\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.331330 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfx7q\" (UniqueName: \"kubernetes.io/projected/1de596ae-343e-4839-b049-61fb6b8fe7c8-kube-api-access-tfx7q\") pod \"ovnkube-control-plane-749d76644c-7mglc\" (UID: \"1de596ae-343e-4839-b049-61fb6b8fe7c8\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.331377 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1de596ae-343e-4839-b049-61fb6b8fe7c8-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7mglc\" (UID: \"1de596ae-343e-4839-b049-61fb6b8fe7c8\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.337860 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.358730 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.358804 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.358831 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.358852 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.358867 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:02Z","lastTransitionTime":"2025-11-28T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.363514 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.380498 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.397697 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.417172 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587eabbf22cb05ef5448589737f83397316b62a2513f9274eec3a219ca48183d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:00Z\\\",\\\"message\\\":\\\"xternalversions/factory.go:140\\\\nI1128 13:18:59.192030 6168 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.192068 6168 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.192223 6168 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 13:18:59.192523 6168 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 13:18:59.192615 6168 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1128 13:18:59.192708 6168 reflector.go:311] Stopping reflector *v1.ClusterUserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.193171 6168 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.431586 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.432320 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfx7q\" (UniqueName: \"kubernetes.io/projected/1de596ae-343e-4839-b049-61fb6b8fe7c8-kube-api-access-tfx7q\") pod \"ovnkube-control-plane-749d76644c-7mglc\" (UID: \"1de596ae-343e-4839-b049-61fb6b8fe7c8\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.432393 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1de596ae-343e-4839-b049-61fb6b8fe7c8-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7mglc\" (UID: \"1de596ae-343e-4839-b049-61fb6b8fe7c8\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.432441 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1de596ae-343e-4839-b049-61fb6b8fe7c8-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7mglc\" (UID: \"1de596ae-343e-4839-b049-61fb6b8fe7c8\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.432488 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1de596ae-343e-4839-b049-61fb6b8fe7c8-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7mglc\" (UID: \"1de596ae-343e-4839-b049-61fb6b8fe7c8\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.433216 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1de596ae-343e-4839-b049-61fb6b8fe7c8-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7mglc\" (UID: \"1de596ae-343e-4839-b049-61fb6b8fe7c8\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.433319 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1de596ae-343e-4839-b049-61fb6b8fe7c8-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7mglc\" (UID: \"1de596ae-343e-4839-b049-61fb6b8fe7c8\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.440841 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1de596ae-343e-4839-b049-61fb6b8fe7c8-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7mglc\" (UID: \"1de596ae-343e-4839-b049-61fb6b8fe7c8\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.445908 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.454247 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfx7q\" (UniqueName: \"kubernetes.io/projected/1de596ae-343e-4839-b049-61fb6b8fe7c8-kube-api-access-tfx7q\") pod \"ovnkube-control-plane-749d76644c-7mglc\" (UID: \"1de596ae-343e-4839-b049-61fb6b8fe7c8\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.461652 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.462107 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.462139 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.462151 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.462172 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.462187 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:02Z","lastTransitionTime":"2025-11-28T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.478931 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.493991 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.505932 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.521874 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:02Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.565451 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.565509 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.565521 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.565540 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.565553 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:02Z","lastTransitionTime":"2025-11-28T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.579953 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" Nov 28 13:19:02 crc kubenswrapper[4857]: W1128 13:19:02.596829 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1de596ae_343e_4839_b049_61fb6b8fe7c8.slice/crio-ddb64992f6316a31ac79105a311ef376c72d32ea87f5922a488f71ac5405ff06 WatchSource:0}: Error finding container ddb64992f6316a31ac79105a311ef376c72d32ea87f5922a488f71ac5405ff06: Status 404 returned error can't find the container with id ddb64992f6316a31ac79105a311ef376c72d32ea87f5922a488f71ac5405ff06 Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.603414 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" event={"ID":"1de596ae-343e-4839-b049-61fb6b8fe7c8","Type":"ContainerStarted","Data":"ddb64992f6316a31ac79105a311ef376c72d32ea87f5922a488f71ac5405ff06"} Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.668522 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.668565 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.668578 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.668593 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.668604 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:02Z","lastTransitionTime":"2025-11-28T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.772533 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.772578 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.772589 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.772607 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.772620 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:02Z","lastTransitionTime":"2025-11-28T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.875662 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.875721 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.875738 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.875797 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.875816 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:02Z","lastTransitionTime":"2025-11-28T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.979005 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.979427 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.979605 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.979740 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:02 crc kubenswrapper[4857]: I1128 13:19:02.979900 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:02Z","lastTransitionTime":"2025-11-28T13:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.084718 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.084781 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.084797 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.084817 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.084831 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:03Z","lastTransitionTime":"2025-11-28T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.188167 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.188200 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.188212 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.188228 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.188240 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:03Z","lastTransitionTime":"2025-11-28T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.290676 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.291082 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.291095 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.291119 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.291132 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:03Z","lastTransitionTime":"2025-11-28T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.308558 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.308558 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.308678 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.308782 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.308565 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.308865 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.344447 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.344629 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.344649 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:19:19.344620857 +0000 UTC m=+51.371996024 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.344694 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.344727 4857 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.344820 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:19:19.344799172 +0000 UTC m=+51.372174389 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.344924 4857 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.345007 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:19:19.344994048 +0000 UTC m=+51.372369225 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.393433 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.393473 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.393485 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.393502 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.393511 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:03Z","lastTransitionTime":"2025-11-28T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.446352 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.446438 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.446634 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.446654 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.446668 4857 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.446687 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.446737 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 13:19:19.446718276 +0000 UTC m=+51.474093443 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.446740 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.446788 4857 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.446871 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 13:19:19.446837129 +0000 UTC m=+51.474212306 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.496614 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.496649 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.496658 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.496674 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.496685 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:03Z","lastTransitionTime":"2025-11-28T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.599178 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.599232 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.599242 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.599264 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.599277 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:03Z","lastTransitionTime":"2025-11-28T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.608120 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" event={"ID":"1de596ae-343e-4839-b049-61fb6b8fe7c8","Type":"ContainerStarted","Data":"40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89"} Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.608177 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" event={"ID":"1de596ae-343e-4839-b049-61fb6b8fe7c8","Type":"ContainerStarted","Data":"8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45"} Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.610436 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovnkube-controller/1.log" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.611372 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovnkube-controller/0.log" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.614852 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf74e995-2208-43c6-b89d-10318f55cda8" containerID="6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0" exitCode=1 Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.614900 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerDied","Data":"6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0"} Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.614968 4857 scope.go:117] "RemoveContainer" containerID="587eabbf22cb05ef5448589737f83397316b62a2513f9274eec3a219ca48183d" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.616249 4857 scope.go:117] "RemoveContainer" containerID="6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0" Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.616550 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-w25ss_openshift-ovn-kubernetes(bf74e995-2208-43c6-b89d-10318f55cda8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.631869 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.649907 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.662519 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.675956 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.686658 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.702578 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.702614 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.702626 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.702645 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.702669 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:03Z","lastTransitionTime":"2025-11-28T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.708536 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.722356 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-jspn8"] Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.722989 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.723078 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.723155 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.737516 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.754599 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587eabbf22cb05ef5448589737f83397316b62a2513f9274eec3a219ca48183d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:00Z\\\",\\\"message\\\":\\\"xternalversions/factory.go:140\\\\nI1128 13:18:59.192030 6168 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.192068 6168 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.192223 6168 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 13:18:59.192523 6168 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 13:18:59.192615 6168 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1128 13:18:59.192708 6168 reflector.go:311] Stopping reflector *v1.ClusterUserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.193171 6168 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.765379 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.775180 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.786171 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.797891 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.805142 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.805170 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.805178 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.805193 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.805204 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:03Z","lastTransitionTime":"2025-11-28T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.813594 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.829709 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.843799 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.850617 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmzxm\" (UniqueName: \"kubernetes.io/projected/9ab9b94a-66a7-4d68-8046-d6d97595330d-kube-api-access-qmzxm\") pod \"network-metrics-daemon-jspn8\" (UID: \"9ab9b94a-66a7-4d68-8046-d6d97595330d\") " pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.850664 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs\") pod \"network-metrics-daemon-jspn8\" (UID: \"9ab9b94a-66a7-4d68-8046-d6d97595330d\") " pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.858046 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.871076 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.884417 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.898344 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.908784 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.908816 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.908828 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.908846 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.908857 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:03Z","lastTransitionTime":"2025-11-28T13:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.916813 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.932717 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.950868 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.951954 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmzxm\" (UniqueName: \"kubernetes.io/projected/9ab9b94a-66a7-4d68-8046-d6d97595330d-kube-api-access-qmzxm\") pod \"network-metrics-daemon-jspn8\" (UID: \"9ab9b94a-66a7-4d68-8046-d6d97595330d\") " pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.952011 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs\") pod \"network-metrics-daemon-jspn8\" (UID: \"9ab9b94a-66a7-4d68-8046-d6d97595330d\") " pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.952163 4857 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:19:03 crc kubenswrapper[4857]: E1128 13:19:03.952223 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs podName:9ab9b94a-66a7-4d68-8046-d6d97595330d nodeName:}" failed. No retries permitted until 2025-11-28 13:19:04.452205575 +0000 UTC m=+36.479580762 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs") pod "network-metrics-daemon-jspn8" (UID: "9ab9b94a-66a7-4d68-8046-d6d97595330d") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.970403 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jspn8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ab9b94a-66a7-4d68-8046-d6d97595330d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jspn8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.973436 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmzxm\" (UniqueName: \"kubernetes.io/projected/9ab9b94a-66a7-4d68-8046-d6d97595330d-kube-api-access-qmzxm\") pod \"network-metrics-daemon-jspn8\" (UID: \"9ab9b94a-66a7-4d68-8046-d6d97595330d\") " pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:03 crc kubenswrapper[4857]: I1128 13:19:03.989010 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.012599 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.012689 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.012714 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.012745 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.012804 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:04Z","lastTransitionTime":"2025-11-28T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.013918 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:04Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.032422 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:04Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.049136 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:04Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.082675 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:04Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.100806 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:04Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.114690 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:04Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.116461 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.116504 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.116519 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.116535 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.116548 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:04Z","lastTransitionTime":"2025-11-28T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.131070 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:04Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.162423 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587eabbf22cb05ef5448589737f83397316b62a2513f9274eec3a219ca48183d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:00Z\\\",\\\"message\\\":\\\"xternalversions/factory.go:140\\\\nI1128 13:18:59.192030 6168 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.192068 6168 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.192223 6168 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 13:18:59.192523 6168 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 13:18:59.192615 6168 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1128 13:18:59.192708 6168 reflector.go:311] Stopping reflector *v1.ClusterUserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.193171 6168 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"message\\\":\\\"3:19:02.335402 6308 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 13:19:02.335415 6308 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 13:19:02.335420 6308 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 13:19:02.335432 6308 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:19:02.335440 6308 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:19:02.335473 6308 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 13:19:02.335477 6308 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:19:02.335496 6308 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 13:19:02.335502 6308 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:19:02.335524 6308 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 13:19:02.335533 6308 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 13:19:02.335524 6308 factory.go:656] Stopping watch factory\\\\nI1128 13:19:02.335545 6308 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 13:19:02.335557 6308 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:19:02.335525 6308 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 13:19:02.335595 6308 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 13\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:04Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.219646 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.219712 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.219730 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.219794 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.219827 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:04Z","lastTransitionTime":"2025-11-28T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.322476 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.322542 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.322554 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.322568 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.322579 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:04Z","lastTransitionTime":"2025-11-28T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.425105 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.425153 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.425164 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.425181 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.425192 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:04Z","lastTransitionTime":"2025-11-28T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.456923 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs\") pod \"network-metrics-daemon-jspn8\" (UID: \"9ab9b94a-66a7-4d68-8046-d6d97595330d\") " pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:04 crc kubenswrapper[4857]: E1128 13:19:04.457120 4857 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:19:04 crc kubenswrapper[4857]: E1128 13:19:04.457214 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs podName:9ab9b94a-66a7-4d68-8046-d6d97595330d nodeName:}" failed. No retries permitted until 2025-11-28 13:19:05.457192069 +0000 UTC m=+37.484567246 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs") pod "network-metrics-daemon-jspn8" (UID: "9ab9b94a-66a7-4d68-8046-d6d97595330d") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.528397 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.528445 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.528463 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.528485 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.528503 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:04Z","lastTransitionTime":"2025-11-28T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.620834 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovnkube-controller/1.log" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.630067 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.630107 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.630122 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.630140 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.630156 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:04Z","lastTransitionTime":"2025-11-28T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.732832 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.732883 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.732899 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.732920 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.732934 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:04Z","lastTransitionTime":"2025-11-28T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.836147 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.836217 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.836240 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.836268 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.836292 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:04Z","lastTransitionTime":"2025-11-28T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.938648 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.939069 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.939254 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.939393 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:04 crc kubenswrapper[4857]: I1128 13:19:04.939518 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:04Z","lastTransitionTime":"2025-11-28T13:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.043026 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.043101 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.043117 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.043136 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.043149 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:05Z","lastTransitionTime":"2025-11-28T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.146678 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.146747 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.146815 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.146846 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.146871 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:05Z","lastTransitionTime":"2025-11-28T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.249510 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.249546 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.249556 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.249573 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.249586 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:05Z","lastTransitionTime":"2025-11-28T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.309018 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.309027 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:05 crc kubenswrapper[4857]: E1128 13:19:05.309808 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.309091 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:05 crc kubenswrapper[4857]: E1128 13:19:05.309953 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:05 crc kubenswrapper[4857]: E1128 13:19:05.309515 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.309039 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:05 crc kubenswrapper[4857]: E1128 13:19:05.310059 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.357888 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.358307 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.358514 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.358702 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.358942 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:05Z","lastTransitionTime":"2025-11-28T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.430902 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.449061 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:05Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.462491 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.462546 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.462623 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.462650 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.462670 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:05Z","lastTransitionTime":"2025-11-28T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.468284 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs\") pod \"network-metrics-daemon-jspn8\" (UID: \"9ab9b94a-66a7-4d68-8046-d6d97595330d\") " pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:05 crc kubenswrapper[4857]: E1128 13:19:05.468471 4857 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:19:05 crc kubenswrapper[4857]: E1128 13:19:05.468596 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs podName:9ab9b94a-66a7-4d68-8046-d6d97595330d nodeName:}" failed. No retries permitted until 2025-11-28 13:19:07.468562569 +0000 UTC m=+39.495937796 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs") pod "network-metrics-daemon-jspn8" (UID: "9ab9b94a-66a7-4d68-8046-d6d97595330d") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.468888 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:05Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.490678 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:05Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.510403 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:05Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.525017 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:05Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.540269 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:05Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.554692 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:05Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.564854 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.564890 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.564900 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.564916 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.564924 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:05Z","lastTransitionTime":"2025-11-28T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.573382 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:05Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.584019 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jspn8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ab9b94a-66a7-4d68-8046-d6d97595330d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jspn8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:05Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.599576 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:05Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.616225 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:05Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.647531 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:05Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.663825 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:05Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.667961 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.668019 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.668038 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.668059 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.668076 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:05Z","lastTransitionTime":"2025-11-28T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.681431 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:05Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.697476 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:05Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.715530 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587eabbf22cb05ef5448589737f83397316b62a2513f9274eec3a219ca48183d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:00Z\\\",\\\"message\\\":\\\"xternalversions/factory.go:140\\\\nI1128 13:18:59.192030 6168 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.192068 6168 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.192223 6168 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 13:18:59.192523 6168 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 13:18:59.192615 6168 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1128 13:18:59.192708 6168 reflector.go:311] Stopping reflector *v1.ClusterUserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.193171 6168 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"message\\\":\\\"3:19:02.335402 6308 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 13:19:02.335415 6308 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 13:19:02.335420 6308 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 13:19:02.335432 6308 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:19:02.335440 6308 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:19:02.335473 6308 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 13:19:02.335477 6308 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:19:02.335496 6308 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 13:19:02.335502 6308 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:19:02.335524 6308 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 13:19:02.335533 6308 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 13:19:02.335524 6308 factory.go:656] Stopping watch factory\\\\nI1128 13:19:02.335545 6308 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 13:19:02.335557 6308 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:19:02.335525 6308 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 13:19:02.335595 6308 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 13\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:05Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.728633 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:05Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.771534 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.771572 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.771581 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.771598 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.771609 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:05Z","lastTransitionTime":"2025-11-28T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.874096 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.874797 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.874840 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.874877 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.874895 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:05Z","lastTransitionTime":"2025-11-28T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.980499 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.980559 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.980578 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.980603 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:05 crc kubenswrapper[4857]: I1128 13:19:05.980619 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:05Z","lastTransitionTime":"2025-11-28T13:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.083597 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.084009 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.084202 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.084453 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.084656 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:06Z","lastTransitionTime":"2025-11-28T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.187145 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.187526 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.187679 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.187849 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.188017 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:06Z","lastTransitionTime":"2025-11-28T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.291496 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.291597 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.291662 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.291691 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.291708 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:06Z","lastTransitionTime":"2025-11-28T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.394734 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.394818 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.394842 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.394872 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.394893 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:06Z","lastTransitionTime":"2025-11-28T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.498112 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.498408 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.498510 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.498592 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.498675 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:06Z","lastTransitionTime":"2025-11-28T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.602284 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.602345 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.602366 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.602394 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.602419 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:06Z","lastTransitionTime":"2025-11-28T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.705705 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.705799 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.705819 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.705842 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.705859 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:06Z","lastTransitionTime":"2025-11-28T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.809537 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.809590 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.809612 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.809638 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.809657 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:06Z","lastTransitionTime":"2025-11-28T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.911792 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.911845 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.911857 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.911872 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:06 crc kubenswrapper[4857]: I1128 13:19:06.911884 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:06Z","lastTransitionTime":"2025-11-28T13:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.013988 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.014058 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.014079 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.014107 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.014131 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:07Z","lastTransitionTime":"2025-11-28T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.117239 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.117301 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.117319 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.117342 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.117359 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:07Z","lastTransitionTime":"2025-11-28T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.220656 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.220694 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.220706 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.220723 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.220737 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:07Z","lastTransitionTime":"2025-11-28T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.308631 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.308786 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.308837 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.308942 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:07 crc kubenswrapper[4857]: E1128 13:19:07.308938 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:07 crc kubenswrapper[4857]: E1128 13:19:07.309255 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:07 crc kubenswrapper[4857]: E1128 13:19:07.309339 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:07 crc kubenswrapper[4857]: E1128 13:19:07.309393 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.323228 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.323284 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.323306 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.323339 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.323364 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:07Z","lastTransitionTime":"2025-11-28T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.427040 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.427144 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.427233 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.427406 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.427446 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:07Z","lastTransitionTime":"2025-11-28T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.491078 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs\") pod \"network-metrics-daemon-jspn8\" (UID: \"9ab9b94a-66a7-4d68-8046-d6d97595330d\") " pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:07 crc kubenswrapper[4857]: E1128 13:19:07.491285 4857 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:19:07 crc kubenswrapper[4857]: E1128 13:19:07.491390 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs podName:9ab9b94a-66a7-4d68-8046-d6d97595330d nodeName:}" failed. No retries permitted until 2025-11-28 13:19:11.491362127 +0000 UTC m=+43.518737334 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs") pod "network-metrics-daemon-jspn8" (UID: "9ab9b94a-66a7-4d68-8046-d6d97595330d") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.530835 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.530892 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.530912 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.530938 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.530955 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:07Z","lastTransitionTime":"2025-11-28T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.633592 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.633667 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.633690 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.633719 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.633737 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:07Z","lastTransitionTime":"2025-11-28T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.737237 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.737294 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.737305 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.737323 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.737336 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:07Z","lastTransitionTime":"2025-11-28T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.839625 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.839656 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.839665 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.839677 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.839686 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:07Z","lastTransitionTime":"2025-11-28T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.942674 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.942812 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.942832 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.942856 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:07 crc kubenswrapper[4857]: I1128 13:19:07.942874 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:07Z","lastTransitionTime":"2025-11-28T13:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.045689 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.045729 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.045741 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.045805 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.045817 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:08Z","lastTransitionTime":"2025-11-28T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.148999 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.149065 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.149084 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.149108 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.149125 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:08Z","lastTransitionTime":"2025-11-28T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.253373 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.253455 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.253479 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.253511 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.253535 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:08Z","lastTransitionTime":"2025-11-28T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.335954 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.353393 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jspn8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ab9b94a-66a7-4d68-8046-d6d97595330d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jspn8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.355659 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.355699 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.355710 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.355725 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.355737 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:08Z","lastTransitionTime":"2025-11-28T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.368323 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.391599 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.406623 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.420331 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.445107 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.458251 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.458998 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.459050 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.459083 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.459099 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:08Z","lastTransitionTime":"2025-11-28T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.460783 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.474001 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.487162 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.507964 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587eabbf22cb05ef5448589737f83397316b62a2513f9274eec3a219ca48183d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:00Z\\\",\\\"message\\\":\\\"xternalversions/factory.go:140\\\\nI1128 13:18:59.192030 6168 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.192068 6168 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.192223 6168 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 13:18:59.192523 6168 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 13:18:59.192615 6168 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1128 13:18:59.192708 6168 reflector.go:311] Stopping reflector *v1.ClusterUserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 13:18:59.193171 6168 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"message\\\":\\\"3:19:02.335402 6308 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 13:19:02.335415 6308 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 13:19:02.335420 6308 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 13:19:02.335432 6308 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:19:02.335440 6308 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:19:02.335473 6308 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 13:19:02.335477 6308 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:19:02.335496 6308 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 13:19:02.335502 6308 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:19:02.335524 6308 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 13:19:02.335533 6308 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 13:19:02.335524 6308 factory.go:656] Stopping watch factory\\\\nI1128 13:19:02.335545 6308 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 13:19:02.335557 6308 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:19:02.335525 6308 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 13:19:02.335595 6308 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 13\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.520027 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.530663 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.542356 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.552984 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.560994 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.561051 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.561076 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.561105 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.561128 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:08Z","lastTransitionTime":"2025-11-28T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.565272 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.576421 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.664224 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.664273 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.664285 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.664302 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.664340 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:08Z","lastTransitionTime":"2025-11-28T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.766899 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.766970 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.766994 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.767036 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.767063 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:08Z","lastTransitionTime":"2025-11-28T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.870837 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.870898 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.870911 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.870928 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.870939 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:08Z","lastTransitionTime":"2025-11-28T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.974799 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.974901 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.974927 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.974960 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:08 crc kubenswrapper[4857]: I1128 13:19:08.974981 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:08Z","lastTransitionTime":"2025-11-28T13:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.078023 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.078092 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.078107 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.078132 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.078150 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:09Z","lastTransitionTime":"2025-11-28T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.181673 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.181737 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.181790 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.181817 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.181835 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:09Z","lastTransitionTime":"2025-11-28T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.285321 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.285423 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.285445 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.285502 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.285524 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:09Z","lastTransitionTime":"2025-11-28T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.309275 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.309288 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.309307 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.309307 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:09 crc kubenswrapper[4857]: E1128 13:19:09.309703 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:09 crc kubenswrapper[4857]: E1128 13:19:09.309864 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:09 crc kubenswrapper[4857]: E1128 13:19:09.309986 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:09 crc kubenswrapper[4857]: E1128 13:19:09.310169 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.387842 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.387920 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.387932 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.387949 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.387971 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:09Z","lastTransitionTime":"2025-11-28T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.491622 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.491683 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.491700 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.491723 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.491740 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:09Z","lastTransitionTime":"2025-11-28T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.595246 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.595288 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.595298 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.595313 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.595324 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:09Z","lastTransitionTime":"2025-11-28T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.697831 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.697901 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.697925 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.697955 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.697977 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:09Z","lastTransitionTime":"2025-11-28T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.800113 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.800183 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.800202 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.800229 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.800249 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:09Z","lastTransitionTime":"2025-11-28T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.903013 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.903051 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.903062 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.903078 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:09 crc kubenswrapper[4857]: I1128 13:19:09.903094 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:09Z","lastTransitionTime":"2025-11-28T13:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.005473 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.005529 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.005543 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.005568 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.005586 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:10Z","lastTransitionTime":"2025-11-28T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.108603 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.108657 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.108669 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.108685 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.108697 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:10Z","lastTransitionTime":"2025-11-28T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.211708 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.211785 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.211797 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.211813 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.211827 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:10Z","lastTransitionTime":"2025-11-28T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.313983 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.314051 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.314068 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.314094 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.314107 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:10Z","lastTransitionTime":"2025-11-28T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.418696 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.419043 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.419156 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.419318 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.419452 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:10Z","lastTransitionTime":"2025-11-28T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.523426 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.523496 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.523515 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.523549 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.523569 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:10Z","lastTransitionTime":"2025-11-28T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.627289 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.627362 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.627386 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.627421 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.627443 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:10Z","lastTransitionTime":"2025-11-28T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.731126 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.731198 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.731220 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.731253 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.731274 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:10Z","lastTransitionTime":"2025-11-28T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.834245 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.834315 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.834334 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.834360 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.834377 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:10Z","lastTransitionTime":"2025-11-28T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.937018 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.937061 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.937069 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.937083 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:10 crc kubenswrapper[4857]: I1128 13:19:10.937093 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:10Z","lastTransitionTime":"2025-11-28T13:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.039947 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.040009 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.040026 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.040049 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.040068 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:11Z","lastTransitionTime":"2025-11-28T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.143523 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.143575 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.143593 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.143618 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.143636 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:11Z","lastTransitionTime":"2025-11-28T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.247300 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.247380 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.247405 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.247435 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.247457 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:11Z","lastTransitionTime":"2025-11-28T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.309122 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:11 crc kubenswrapper[4857]: E1128 13:19:11.309299 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.309345 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.309361 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:11 crc kubenswrapper[4857]: E1128 13:19:11.309537 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.309361 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:11 crc kubenswrapper[4857]: E1128 13:19:11.309618 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:11 crc kubenswrapper[4857]: E1128 13:19:11.309687 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.350335 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.350400 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.350418 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.350445 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.350463 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:11Z","lastTransitionTime":"2025-11-28T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.453136 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.453169 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.453181 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.453196 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.453208 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:11Z","lastTransitionTime":"2025-11-28T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.537873 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs\") pod \"network-metrics-daemon-jspn8\" (UID: \"9ab9b94a-66a7-4d68-8046-d6d97595330d\") " pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:11 crc kubenswrapper[4857]: E1128 13:19:11.538082 4857 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:19:11 crc kubenswrapper[4857]: E1128 13:19:11.538138 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs podName:9ab9b94a-66a7-4d68-8046-d6d97595330d nodeName:}" failed. No retries permitted until 2025-11-28 13:19:19.538123171 +0000 UTC m=+51.565498328 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs") pod "network-metrics-daemon-jspn8" (UID: "9ab9b94a-66a7-4d68-8046-d6d97595330d") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.556087 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.556126 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.556138 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.556154 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.556165 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:11Z","lastTransitionTime":"2025-11-28T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.659555 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.659615 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.659650 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.659689 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.659712 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:11Z","lastTransitionTime":"2025-11-28T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.762741 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.762833 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.762852 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.762878 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.762896 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:11Z","lastTransitionTime":"2025-11-28T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.768474 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.768526 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.768542 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.768567 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.768580 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:11Z","lastTransitionTime":"2025-11-28T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:11 crc kubenswrapper[4857]: E1128 13:19:11.788015 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:11Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.793009 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.793071 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.793089 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.793113 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.793130 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:11Z","lastTransitionTime":"2025-11-28T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:11 crc kubenswrapper[4857]: E1128 13:19:11.811230 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:11Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.816287 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.816380 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.816398 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.816505 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.816532 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:11Z","lastTransitionTime":"2025-11-28T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:11 crc kubenswrapper[4857]: E1128 13:19:11.837454 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:11Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.842724 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.842875 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.842903 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.842938 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.842964 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:11Z","lastTransitionTime":"2025-11-28T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:11 crc kubenswrapper[4857]: E1128 13:19:11.867087 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:11Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.872368 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.872473 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.872544 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.872577 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.872597 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:11Z","lastTransitionTime":"2025-11-28T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:11 crc kubenswrapper[4857]: E1128 13:19:11.894536 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:11Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:11 crc kubenswrapper[4857]: E1128 13:19:11.894792 4857 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.897236 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.897555 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.897644 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.897670 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:11 crc kubenswrapper[4857]: I1128 13:19:11.897688 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:11Z","lastTransitionTime":"2025-11-28T13:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.001802 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.001867 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.001892 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.001922 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.001948 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:12Z","lastTransitionTime":"2025-11-28T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.105515 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.105578 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.105598 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.105623 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.105650 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:12Z","lastTransitionTime":"2025-11-28T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.209026 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.209094 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.209120 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.209150 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.209175 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:12Z","lastTransitionTime":"2025-11-28T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.313272 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.313322 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.313339 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.313363 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.313380 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:12Z","lastTransitionTime":"2025-11-28T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.416301 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.416348 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.416360 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.416396 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.416409 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:12Z","lastTransitionTime":"2025-11-28T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.519358 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.519419 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.519436 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.519461 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.519479 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:12Z","lastTransitionTime":"2025-11-28T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.622718 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.622792 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.622807 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.622831 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.622876 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:12Z","lastTransitionTime":"2025-11-28T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.726369 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.726431 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.726440 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.726455 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.726465 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:12Z","lastTransitionTime":"2025-11-28T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.829380 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.829441 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.829464 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.829495 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.829517 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:12Z","lastTransitionTime":"2025-11-28T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.932740 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.932824 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.932838 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.932864 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:12 crc kubenswrapper[4857]: I1128 13:19:12.932878 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:12Z","lastTransitionTime":"2025-11-28T13:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.035235 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.035282 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.035297 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.035318 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.035332 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:13Z","lastTransitionTime":"2025-11-28T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.138173 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.138795 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.138885 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.138981 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.139125 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:13Z","lastTransitionTime":"2025-11-28T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.242672 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.242736 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.242789 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.242818 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.242839 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:13Z","lastTransitionTime":"2025-11-28T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.309208 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.309271 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:13 crc kubenswrapper[4857]: E1128 13:19:13.309406 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.309236 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:13 crc kubenswrapper[4857]: E1128 13:19:13.309736 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.309807 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:13 crc kubenswrapper[4857]: E1128 13:19:13.309892 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:13 crc kubenswrapper[4857]: E1128 13:19:13.310046 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.345148 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.345174 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.345182 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.345195 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.345204 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:13Z","lastTransitionTime":"2025-11-28T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.448909 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.448954 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.448967 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.448986 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.448999 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:13Z","lastTransitionTime":"2025-11-28T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.551303 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.551353 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.551364 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.551381 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.551394 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:13Z","lastTransitionTime":"2025-11-28T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.655016 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.655065 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.655083 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.655105 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.655122 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:13Z","lastTransitionTime":"2025-11-28T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.758126 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.758390 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.758519 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.758609 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.758695 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:13Z","lastTransitionTime":"2025-11-28T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.861994 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.862063 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.862144 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.862176 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.862199 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:13Z","lastTransitionTime":"2025-11-28T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.965679 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.965734 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.965788 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.965836 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:13 crc kubenswrapper[4857]: I1128 13:19:13.965866 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:13Z","lastTransitionTime":"2025-11-28T13:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.068535 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.068610 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.068631 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.068660 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.068680 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:14Z","lastTransitionTime":"2025-11-28T13:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.172088 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.172193 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.172218 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.172245 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.172265 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:14Z","lastTransitionTime":"2025-11-28T13:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.275197 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.275251 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.275268 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.275292 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.275309 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:14Z","lastTransitionTime":"2025-11-28T13:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.378958 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.379030 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.379058 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.379089 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.379113 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:14Z","lastTransitionTime":"2025-11-28T13:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.482262 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.482618 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.482851 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.483100 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.483274 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:14Z","lastTransitionTime":"2025-11-28T13:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.586745 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.587155 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.587324 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.587451 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.587574 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:14Z","lastTransitionTime":"2025-11-28T13:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.690325 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.690382 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.690401 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.690429 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.690446 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:14Z","lastTransitionTime":"2025-11-28T13:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.793836 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.794203 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.794341 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.794523 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.794677 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:14Z","lastTransitionTime":"2025-11-28T13:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.898353 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.898721 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.898897 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.899057 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:14 crc kubenswrapper[4857]: I1128 13:19:14.899181 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:14Z","lastTransitionTime":"2025-11-28T13:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.003040 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.003090 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.003108 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.003129 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.003145 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:15Z","lastTransitionTime":"2025-11-28T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.105508 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.105585 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.105606 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.105641 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.105664 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:15Z","lastTransitionTime":"2025-11-28T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.208671 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.208713 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.208724 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.208740 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.208776 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:15Z","lastTransitionTime":"2025-11-28T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.309361 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.309422 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.309441 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.309361 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:15 crc kubenswrapper[4857]: E1128 13:19:15.309553 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:15 crc kubenswrapper[4857]: E1128 13:19:15.309657 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:15 crc kubenswrapper[4857]: E1128 13:19:15.309799 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:15 crc kubenswrapper[4857]: E1128 13:19:15.309898 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.313138 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.313177 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.313192 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.313210 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.313224 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:15Z","lastTransitionTime":"2025-11-28T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.415941 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.416308 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.416506 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.416680 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.416897 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:15Z","lastTransitionTime":"2025-11-28T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.519663 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.519712 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.519727 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.519745 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.519777 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:15Z","lastTransitionTime":"2025-11-28T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.622968 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.623033 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.623057 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.623097 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.623120 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:15Z","lastTransitionTime":"2025-11-28T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.726323 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.726382 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.726401 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.726427 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.726444 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:15Z","lastTransitionTime":"2025-11-28T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.829684 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.829745 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.829828 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.829856 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.829873 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:15Z","lastTransitionTime":"2025-11-28T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.933669 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.933721 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.933735 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.933788 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:15 crc kubenswrapper[4857]: I1128 13:19:15.933803 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:15Z","lastTransitionTime":"2025-11-28T13:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.037151 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.037222 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.037278 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.037308 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.037332 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:16Z","lastTransitionTime":"2025-11-28T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.141352 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.141739 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.141867 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.141963 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.142057 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:16Z","lastTransitionTime":"2025-11-28T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.245484 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.245530 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.245540 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.245555 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.245568 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:16Z","lastTransitionTime":"2025-11-28T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.310393 4857 scope.go:117] "RemoveContainer" containerID="6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.328399 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.348824 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.350067 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.350117 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.350136 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.350160 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.350174 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:16Z","lastTransitionTime":"2025-11-28T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.369009 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.388422 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.403729 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.422973 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.434127 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jspn8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ab9b94a-66a7-4d68-8046-d6d97595330d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jspn8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.448738 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.452926 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.452979 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.452996 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.453019 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.453037 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:16Z","lastTransitionTime":"2025-11-28T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.468488 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.483453 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.496763 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.508122 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.540500 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.555739 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.555785 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.555795 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.555808 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.555816 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:16Z","lastTransitionTime":"2025-11-28T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.560115 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.572054 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.587736 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"message\\\":\\\"3:19:02.335402 6308 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 13:19:02.335415 6308 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 13:19:02.335420 6308 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 13:19:02.335432 6308 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:19:02.335440 6308 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:19:02.335473 6308 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 13:19:02.335477 6308 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:19:02.335496 6308 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 13:19:02.335502 6308 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:19:02.335524 6308 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 13:19:02.335533 6308 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 13:19:02.335524 6308 factory.go:656] Stopping watch factory\\\\nI1128 13:19:02.335545 6308 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 13:19:02.335557 6308 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:19:02.335525 6308 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 13:19:02.335595 6308 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 13\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-w25ss_openshift-ovn-kubernetes(bf74e995-2208-43c6-b89d-10318f55cda8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.596984 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.658658 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.658723 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.658740 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.658782 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.658804 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:16Z","lastTransitionTime":"2025-11-28T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.680528 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovnkube-controller/1.log" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.684006 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerStarted","Data":"ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a"} Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.684701 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.703342 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.726863 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"message\\\":\\\"3:19:02.335402 6308 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 13:19:02.335415 6308 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 13:19:02.335420 6308 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 13:19:02.335432 6308 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:19:02.335440 6308 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:19:02.335473 6308 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 13:19:02.335477 6308 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:19:02.335496 6308 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 13:19:02.335502 6308 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:19:02.335524 6308 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 13:19:02.335533 6308 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 13:19:02.335524 6308 factory.go:656] Stopping watch factory\\\\nI1128 13:19:02.335545 6308 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 13:19:02.335557 6308 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:19:02.335525 6308 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 13:19:02.335595 6308 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 13\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.741101 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.754315 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.760999 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.761036 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.761047 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.761064 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.761078 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:16Z","lastTransitionTime":"2025-11-28T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.770670 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.784374 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.797627 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.817051 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.833200 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.850042 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.863986 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.864028 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.864039 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.864088 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.864101 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:16Z","lastTransitionTime":"2025-11-28T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.864940 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.874513 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jspn8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ab9b94a-66a7-4d68-8046-d6d97595330d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jspn8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.884804 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.895169 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.917500 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.930424 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.941492 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:16Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.966169 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.966203 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.966211 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.966225 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:16 crc kubenswrapper[4857]: I1128 13:19:16.966236 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:16Z","lastTransitionTime":"2025-11-28T13:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.068395 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.068442 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.068455 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.068473 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.068485 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:17Z","lastTransitionTime":"2025-11-28T13:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.171393 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.171430 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.171438 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.171452 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.171462 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:17Z","lastTransitionTime":"2025-11-28T13:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.273230 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.273274 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.273285 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.273303 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.273312 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:17Z","lastTransitionTime":"2025-11-28T13:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.309046 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.309177 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.309272 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:17 crc kubenswrapper[4857]: E1128 13:19:17.309265 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.309338 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:17 crc kubenswrapper[4857]: E1128 13:19:17.309483 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:17 crc kubenswrapper[4857]: E1128 13:19:17.309638 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:17 crc kubenswrapper[4857]: E1128 13:19:17.309744 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.376275 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.376348 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.376370 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.376396 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.376415 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:17Z","lastTransitionTime":"2025-11-28T13:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.507028 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.507076 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.507089 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.507107 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.507119 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:17Z","lastTransitionTime":"2025-11-28T13:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.619161 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.619309 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.619362 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.619391 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.619410 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:17Z","lastTransitionTime":"2025-11-28T13:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.722898 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.722961 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.723004 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.723034 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.723057 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:17Z","lastTransitionTime":"2025-11-28T13:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.826066 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.826139 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.826162 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.826190 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.826215 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:17Z","lastTransitionTime":"2025-11-28T13:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.929248 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.929304 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.929322 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.929345 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:17 crc kubenswrapper[4857]: I1128 13:19:17.929362 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:17Z","lastTransitionTime":"2025-11-28T13:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.031813 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.031887 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.031906 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.031931 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.031952 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:18Z","lastTransitionTime":"2025-11-28T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.135375 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.135448 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.135471 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.135517 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.135542 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:18Z","lastTransitionTime":"2025-11-28T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.238880 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.238940 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.238957 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.238983 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.239001 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:18Z","lastTransitionTime":"2025-11-28T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.334469 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.340987 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.341037 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.341055 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.341080 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.341100 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:18Z","lastTransitionTime":"2025-11-28T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.358933 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"message\\\":\\\"3:19:02.335402 6308 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 13:19:02.335415 6308 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 13:19:02.335420 6308 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 13:19:02.335432 6308 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:19:02.335440 6308 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:19:02.335473 6308 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 13:19:02.335477 6308 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:19:02.335496 6308 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 13:19:02.335502 6308 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:19:02.335524 6308 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 13:19:02.335533 6308 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 13:19:02.335524 6308 factory.go:656] Stopping watch factory\\\\nI1128 13:19:02.335545 6308 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 13:19:02.335557 6308 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:19:02.335525 6308 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 13:19:02.335595 6308 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 13\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.370203 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.382480 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.402255 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.413137 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.426837 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.440725 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.443189 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.443241 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.443252 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.443271 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.443283 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:18Z","lastTransitionTime":"2025-11-28T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.452991 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.461985 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jspn8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ab9b94a-66a7-4d68-8046-d6d97595330d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jspn8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.472881 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.484891 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.495195 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.504922 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.514009 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.530028 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.542252 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.545852 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.545891 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.545900 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.545915 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.545924 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:18Z","lastTransitionTime":"2025-11-28T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.649276 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.649367 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.649387 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.649410 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.649427 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:18Z","lastTransitionTime":"2025-11-28T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.693715 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovnkube-controller/2.log" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.694736 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovnkube-controller/1.log" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.697699 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf74e995-2208-43c6-b89d-10318f55cda8" containerID="ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a" exitCode=1 Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.697784 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerDied","Data":"ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a"} Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.697833 4857 scope.go:117] "RemoveContainer" containerID="6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.699044 4857 scope.go:117] "RemoveContainer" containerID="ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a" Nov 28 13:19:18 crc kubenswrapper[4857]: E1128 13:19:18.699312 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-w25ss_openshift-ovn-kubernetes(bf74e995-2208-43c6-b89d-10318f55cda8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.714385 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.726034 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.736127 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.752513 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.752562 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.752579 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.752604 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.752621 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:18Z","lastTransitionTime":"2025-11-28T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.758400 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.777005 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.791200 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.821251 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"message\\\":\\\"3:19:02.335402 6308 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 13:19:02.335415 6308 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 13:19:02.335420 6308 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 13:19:02.335432 6308 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:19:02.335440 6308 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:19:02.335473 6308 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 13:19:02.335477 6308 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:19:02.335496 6308 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 13:19:02.335502 6308 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:19:02.335524 6308 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 13:19:02.335533 6308 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 13:19:02.335524 6308 factory.go:656] Stopping watch factory\\\\nI1128 13:19:02.335545 6308 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 13:19:02.335557 6308 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:19:02.335525 6308 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 13:19:02.335595 6308 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 13\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:18Z\\\",\\\"message\\\":\\\"icPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.43],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1128 13:19:17.230096 6507 services_controller.go:434] Service openshift-console-operator/metrics retrieved from lister for network=default: \\\\u0026Service{ObjectMeta:{metrics openshift-console-operator e4559ce3-2d5a-470f-b8bf-4c8b054d2335 11843 0 2025-02-23 05:38:55 +0000 UTC \\\\u003cnil\\\\u003e \\\\u003cnil\\\\u003e map[name:console-operator] map[capability.openshift.io/name:Console include.release.openshift.io/hypershift:true include.release.openshift.io/ibm-cloud-managed:true include.release.openshift.io/self-managed-high-availability:true include.release.openshift.io/single-node-developer:true service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:serving-cert service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0003197af \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.840476 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.850602 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.855570 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.855637 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.855658 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.855881 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.855920 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:18Z","lastTransitionTime":"2025-11-28T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.862416 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.876823 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.890390 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.899693 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.910338 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.920423 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jspn8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ab9b94a-66a7-4d68-8046-d6d97595330d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jspn8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.932505 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.945867 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:18Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.958355 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.958587 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.958835 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.959021 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:18 crc kubenswrapper[4857]: I1128 13:19:18.959169 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:18Z","lastTransitionTime":"2025-11-28T13:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.061780 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.062017 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.062069 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.062098 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.062118 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:19Z","lastTransitionTime":"2025-11-28T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.165113 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.165187 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.165202 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.165218 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.165230 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:19Z","lastTransitionTime":"2025-11-28T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.269128 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.269824 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.269986 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.270162 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.270321 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:19Z","lastTransitionTime":"2025-11-28T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.309128 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.309297 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.309328 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.309458 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.309590 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.309728 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.310492 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.310874 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.373844 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.373909 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.373929 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.373995 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.374018 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:19Z","lastTransitionTime":"2025-11-28T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.431096 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.431444 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.431576 4857 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.431562 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.431663 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:19:51.43164096 +0000 UTC m=+83.459016157 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.432011 4857 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.432201 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:19:51.432133894 +0000 UTC m=+83.459509101 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.432525 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:19:51.432471194 +0000 UTC m=+83.459846401 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.477294 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.477599 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.477820 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.477982 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.478126 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:19Z","lastTransitionTime":"2025-11-28T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.532455 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.533037 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.533093 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.533115 4857 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.533204 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 13:19:51.533177072 +0000 UTC m=+83.560552279 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.533038 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.533902 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.534105 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.534266 4857 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.534576 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 13:19:51.534543251 +0000 UTC m=+83.561918458 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.580969 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.581409 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.581551 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.581690 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.581860 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:19Z","lastTransitionTime":"2025-11-28T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.634663 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs\") pod \"network-metrics-daemon-jspn8\" (UID: \"9ab9b94a-66a7-4d68-8046-d6d97595330d\") " pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.634900 4857 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:19:19 crc kubenswrapper[4857]: E1128 13:19:19.634995 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs podName:9ab9b94a-66a7-4d68-8046-d6d97595330d nodeName:}" failed. No retries permitted until 2025-11-28 13:19:35.634974322 +0000 UTC m=+67.662349499 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs") pod "network-metrics-daemon-jspn8" (UID: "9ab9b94a-66a7-4d68-8046-d6d97595330d") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.684697 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.684823 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.684845 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.684867 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.684884 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:19Z","lastTransitionTime":"2025-11-28T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.788611 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.788661 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.788679 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.788704 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.788721 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:19Z","lastTransitionTime":"2025-11-28T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.892499 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.892545 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.892558 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.892574 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.892586 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:19Z","lastTransitionTime":"2025-11-28T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.996253 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.996296 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.996310 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.996330 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:19 crc kubenswrapper[4857]: I1128 13:19:19.996346 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:19Z","lastTransitionTime":"2025-11-28T13:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.099876 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.099937 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.099956 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.099979 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.099996 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:20Z","lastTransitionTime":"2025-11-28T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.204129 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.204186 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.204205 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.204230 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.204249 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:20Z","lastTransitionTime":"2025-11-28T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.306888 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.307215 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.307244 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.307268 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.307282 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:20Z","lastTransitionTime":"2025-11-28T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.409660 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.409712 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.409730 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.409800 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.409831 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:20Z","lastTransitionTime":"2025-11-28T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.513521 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.513595 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.513655 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.513686 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.513713 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:20Z","lastTransitionTime":"2025-11-28T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.619690 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.619804 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.619832 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.619862 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.619885 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:20Z","lastTransitionTime":"2025-11-28T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.707773 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovnkube-controller/2.log" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.721513 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.721543 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.721551 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.721565 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.721576 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:20Z","lastTransitionTime":"2025-11-28T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.824464 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.824500 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.824510 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.824523 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.824532 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:20Z","lastTransitionTime":"2025-11-28T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.927584 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.927656 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.927674 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.927700 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:20 crc kubenswrapper[4857]: I1128 13:19:20.927719 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:20Z","lastTransitionTime":"2025-11-28T13:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.031103 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.031203 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.031225 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.031256 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.031277 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:21Z","lastTransitionTime":"2025-11-28T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.134187 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.134580 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.134826 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.135091 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.135279 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:21Z","lastTransitionTime":"2025-11-28T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.237931 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.237971 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.237979 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.237995 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.238004 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:21Z","lastTransitionTime":"2025-11-28T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.309170 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:21 crc kubenswrapper[4857]: E1128 13:19:21.309356 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.309378 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.309193 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:21 crc kubenswrapper[4857]: E1128 13:19:21.309627 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:21 crc kubenswrapper[4857]: E1128 13:19:21.309803 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.309211 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:21 crc kubenswrapper[4857]: E1128 13:19:21.310119 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.341020 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.341098 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.341120 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.341148 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.341170 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:21Z","lastTransitionTime":"2025-11-28T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.444651 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.444709 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.444726 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.444779 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.444798 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:21Z","lastTransitionTime":"2025-11-28T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.548250 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.548308 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.548328 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.548358 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.548375 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:21Z","lastTransitionTime":"2025-11-28T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.651703 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.652148 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.652366 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.652576 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.652796 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:21Z","lastTransitionTime":"2025-11-28T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.733691 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.748074 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.752508 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.756308 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.756361 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.756373 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.756395 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.756409 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:21Z","lastTransitionTime":"2025-11-28T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.770499 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.796818 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.817487 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.839050 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.859446 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.859784 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.859883 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.860004 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.860104 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:21Z","lastTransitionTime":"2025-11-28T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.861720 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.883609 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"message\\\":\\\"3:19:02.335402 6308 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 13:19:02.335415 6308 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 13:19:02.335420 6308 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 13:19:02.335432 6308 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:19:02.335440 6308 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:19:02.335473 6308 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 13:19:02.335477 6308 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:19:02.335496 6308 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 13:19:02.335502 6308 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:19:02.335524 6308 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 13:19:02.335533 6308 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 13:19:02.335524 6308 factory.go:656] Stopping watch factory\\\\nI1128 13:19:02.335545 6308 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 13:19:02.335557 6308 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:19:02.335525 6308 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 13:19:02.335595 6308 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 13\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:18Z\\\",\\\"message\\\":\\\"icPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.43],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1128 13:19:17.230096 6507 services_controller.go:434] Service openshift-console-operator/metrics retrieved from lister for network=default: \\\\u0026Service{ObjectMeta:{metrics openshift-console-operator e4559ce3-2d5a-470f-b8bf-4c8b054d2335 11843 0 2025-02-23 05:38:55 +0000 UTC \\\\u003cnil\\\\u003e \\\\u003cnil\\\\u003e map[name:console-operator] map[capability.openshift.io/name:Console include.release.openshift.io/hypershift:true include.release.openshift.io/ibm-cloud-managed:true include.release.openshift.io/self-managed-high-availability:true include.release.openshift.io/single-node-developer:true service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:serving-cert service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0003197af \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.894710 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.905994 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.916837 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.927608 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.938151 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.948257 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.956948 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.962292 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.962414 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.962485 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.962559 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.962619 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:21Z","lastTransitionTime":"2025-11-28T13:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.970190 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.984156 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:21 crc kubenswrapper[4857]: I1128 13:19:21.991906 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jspn8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ab9b94a-66a7-4d68-8046-d6d97595330d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jspn8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:21Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.069618 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.069691 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.069713 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.069741 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.069798 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:22Z","lastTransitionTime":"2025-11-28T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.096014 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.096051 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.096085 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.096101 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.096111 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:22Z","lastTransitionTime":"2025-11-28T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:22 crc kubenswrapper[4857]: E1128 13:19:22.108376 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.112067 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.112096 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.112114 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.112134 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.112164 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:22Z","lastTransitionTime":"2025-11-28T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:22 crc kubenswrapper[4857]: E1128 13:19:22.125876 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.129983 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.130184 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.130283 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.130407 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.130599 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:22Z","lastTransitionTime":"2025-11-28T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:22 crc kubenswrapper[4857]: E1128 13:19:22.145236 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.149795 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.149862 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.149887 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.149917 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.149939 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:22Z","lastTransitionTime":"2025-11-28T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:22 crc kubenswrapper[4857]: E1128 13:19:22.162656 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.166073 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.166099 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.166107 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.166119 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.166128 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:22Z","lastTransitionTime":"2025-11-28T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:22 crc kubenswrapper[4857]: E1128 13:19:22.176505 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:22Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:22 crc kubenswrapper[4857]: E1128 13:19:22.176629 4857 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.177863 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.177890 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.177902 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.177916 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.177926 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:22Z","lastTransitionTime":"2025-11-28T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.280476 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.280564 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.280598 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.280625 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.280645 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:22Z","lastTransitionTime":"2025-11-28T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.383584 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.383626 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.383639 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.383657 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.383672 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:22Z","lastTransitionTime":"2025-11-28T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.485885 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.485947 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.485967 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.485991 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.486008 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:22Z","lastTransitionTime":"2025-11-28T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.589452 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.589507 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.589523 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.589561 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.589582 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:22Z","lastTransitionTime":"2025-11-28T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.693639 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.693705 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.693728 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.693782 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.693801 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:22Z","lastTransitionTime":"2025-11-28T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.797174 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.797234 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.797256 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.797302 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.797327 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:22Z","lastTransitionTime":"2025-11-28T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.900073 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.900141 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.900161 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.900188 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:22 crc kubenswrapper[4857]: I1128 13:19:22.900209 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:22Z","lastTransitionTime":"2025-11-28T13:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.003088 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.003135 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.003149 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.003171 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.003184 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:23Z","lastTransitionTime":"2025-11-28T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.105828 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.105865 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.105877 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.105892 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.105903 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:23Z","lastTransitionTime":"2025-11-28T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.209286 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.209356 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.209378 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.209408 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.209431 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:23Z","lastTransitionTime":"2025-11-28T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.309091 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.309224 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.309126 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.309252 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:23 crc kubenswrapper[4857]: E1128 13:19:23.309477 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:23 crc kubenswrapper[4857]: E1128 13:19:23.309639 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:23 crc kubenswrapper[4857]: E1128 13:19:23.309806 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:23 crc kubenswrapper[4857]: E1128 13:19:23.309986 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.312599 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.312662 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.312684 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.312711 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.312738 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:23Z","lastTransitionTime":"2025-11-28T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.421301 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.421369 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.421393 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.421417 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.421435 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:23Z","lastTransitionTime":"2025-11-28T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.525035 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.525102 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.525121 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.525147 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.525170 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:23Z","lastTransitionTime":"2025-11-28T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.628817 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.628887 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.628912 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.628941 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.628962 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:23Z","lastTransitionTime":"2025-11-28T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.731478 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.731934 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.732111 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.732288 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.732425 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:23Z","lastTransitionTime":"2025-11-28T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.836141 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.836194 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.836216 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.836245 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.836269 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:23Z","lastTransitionTime":"2025-11-28T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.939738 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.939838 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.939862 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.939890 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:23 crc kubenswrapper[4857]: I1128 13:19:23.939913 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:23Z","lastTransitionTime":"2025-11-28T13:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.043475 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.043542 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.043559 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.043607 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.043625 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:24Z","lastTransitionTime":"2025-11-28T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.145742 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.145908 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.145933 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.146800 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.146911 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:24Z","lastTransitionTime":"2025-11-28T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.250105 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.250153 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.250172 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.250195 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.250219 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:24Z","lastTransitionTime":"2025-11-28T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.353596 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.353638 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.353650 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.353668 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.353680 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:24Z","lastTransitionTime":"2025-11-28T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.456860 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.456925 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.456942 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.456965 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.456985 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:24Z","lastTransitionTime":"2025-11-28T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.560103 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.560219 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.560241 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.560269 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.560288 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:24Z","lastTransitionTime":"2025-11-28T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.663206 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.663254 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.663266 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.663283 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.663296 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:24Z","lastTransitionTime":"2025-11-28T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.766855 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.766965 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.767022 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.767045 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.767063 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:24Z","lastTransitionTime":"2025-11-28T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.870335 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.870396 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.870413 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.870437 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.870454 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:24Z","lastTransitionTime":"2025-11-28T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.973340 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.973401 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.973417 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.973440 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:24 crc kubenswrapper[4857]: I1128 13:19:24.973457 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:24Z","lastTransitionTime":"2025-11-28T13:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.076845 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.077005 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.077039 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.077136 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.077214 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:25Z","lastTransitionTime":"2025-11-28T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.181540 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.181616 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.181632 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.181684 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.181700 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:25Z","lastTransitionTime":"2025-11-28T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.284260 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.284321 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.284334 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.284370 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.284383 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:25Z","lastTransitionTime":"2025-11-28T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.309256 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.309352 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.309290 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.309273 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:25 crc kubenswrapper[4857]: E1128 13:19:25.309491 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:25 crc kubenswrapper[4857]: E1128 13:19:25.309653 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:25 crc kubenswrapper[4857]: E1128 13:19:25.309795 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:25 crc kubenswrapper[4857]: E1128 13:19:25.309974 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.387735 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.387852 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.387877 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.387907 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.387930 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:25Z","lastTransitionTime":"2025-11-28T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.491349 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.491417 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.491441 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.491465 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.491489 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:25Z","lastTransitionTime":"2025-11-28T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.594658 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.594725 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.594743 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.594833 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.594853 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:25Z","lastTransitionTime":"2025-11-28T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.698002 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.698058 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.698129 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.698154 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.698172 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:25Z","lastTransitionTime":"2025-11-28T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.801646 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.801732 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.801794 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.801864 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.801889 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:25Z","lastTransitionTime":"2025-11-28T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.904826 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.904880 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.904898 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.904921 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:25 crc kubenswrapper[4857]: I1128 13:19:25.904939 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:25Z","lastTransitionTime":"2025-11-28T13:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.008508 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.008567 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.008584 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.008607 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.008626 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:26Z","lastTransitionTime":"2025-11-28T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.111741 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.111912 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.111934 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.111961 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.111978 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:26Z","lastTransitionTime":"2025-11-28T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.216130 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.216217 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.216240 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.216271 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.216293 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:26Z","lastTransitionTime":"2025-11-28T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.319068 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.319126 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.319165 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.319192 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.319209 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:26Z","lastTransitionTime":"2025-11-28T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.422361 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.422427 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.422444 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.422468 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.422487 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:26Z","lastTransitionTime":"2025-11-28T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.526241 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.526309 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.526326 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.526348 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.526365 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:26Z","lastTransitionTime":"2025-11-28T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.629310 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.629381 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.629402 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.629430 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.629449 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:26Z","lastTransitionTime":"2025-11-28T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.731656 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.731705 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.731718 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.731735 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.731767 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:26Z","lastTransitionTime":"2025-11-28T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.835111 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.835154 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.835164 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.835183 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.835194 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:26Z","lastTransitionTime":"2025-11-28T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.937299 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.937364 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.937383 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.937408 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:26 crc kubenswrapper[4857]: I1128 13:19:26.937425 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:26Z","lastTransitionTime":"2025-11-28T13:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.040079 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.040241 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.040262 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.040285 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.040303 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:27Z","lastTransitionTime":"2025-11-28T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.143153 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.143220 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.143239 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.143263 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.143280 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:27Z","lastTransitionTime":"2025-11-28T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.246116 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.246463 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.246594 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.246728 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.246918 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:27Z","lastTransitionTime":"2025-11-28T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.308928 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.309039 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:27 crc kubenswrapper[4857]: E1128 13:19:27.309135 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.309243 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:27 crc kubenswrapper[4857]: E1128 13:19:27.309404 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:27 crc kubenswrapper[4857]: E1128 13:19:27.309533 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.308960 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:27 crc kubenswrapper[4857]: E1128 13:19:27.309692 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.350309 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.350348 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.350358 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.350373 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.350385 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:27Z","lastTransitionTime":"2025-11-28T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.452885 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.452960 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.452972 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.452992 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.453003 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:27Z","lastTransitionTime":"2025-11-28T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.555393 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.555641 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.555703 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.555792 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.555859 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:27Z","lastTransitionTime":"2025-11-28T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.659046 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.659122 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.659140 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.659164 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.659185 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:27Z","lastTransitionTime":"2025-11-28T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.762864 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.763315 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.763517 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.763660 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.763878 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:27Z","lastTransitionTime":"2025-11-28T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.867856 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.867951 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.867975 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.868006 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.868029 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:27Z","lastTransitionTime":"2025-11-28T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.971168 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.971573 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.971727 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.971995 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:27 crc kubenswrapper[4857]: I1128 13:19:27.972211 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:27Z","lastTransitionTime":"2025-11-28T13:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.075465 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.075528 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.075551 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.075579 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.075599 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:28Z","lastTransitionTime":"2025-11-28T13:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.179019 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.179093 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.179110 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.179134 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.179152 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:28Z","lastTransitionTime":"2025-11-28T13:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.281948 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.281982 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.281992 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.282004 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.282012 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:28Z","lastTransitionTime":"2025-11-28T13:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.323139 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.338072 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.349406 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jspn8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ab9b94a-66a7-4d68-8046-d6d97595330d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jspn8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.362171 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.374418 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e77ee052-7a38-4552-8c30-30bfebe79716\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6c67a288ed242cc97c05f9c8a01591c4ea3c3b8bb11e4e76d38bba7dd17f15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3e7e3b66ec5d45bfcbe5f4de7e21b540ba5bcc9859f3753465db8f992b731d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://953e18c7e26d1dbbd6f09ba86ce60483d35bd6bb271a76998acbc9e2d333a034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.384007 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.384042 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.384053 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.384069 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.384081 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:28Z","lastTransitionTime":"2025-11-28T13:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.390352 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.404130 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.414697 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.435457 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.452285 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b9dff614e01fbce60f50ab11f5c6163da77cf630ef0fbaee81db45fa15b1be0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"message\\\":\\\"3:19:02.335402 6308 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 13:19:02.335415 6308 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 13:19:02.335420 6308 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 13:19:02.335432 6308 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:19:02.335440 6308 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:19:02.335473 6308 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 13:19:02.335477 6308 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:19:02.335496 6308 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 13:19:02.335502 6308 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:19:02.335524 6308 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 13:19:02.335533 6308 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 13:19:02.335524 6308 factory.go:656] Stopping watch factory\\\\nI1128 13:19:02.335545 6308 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 13:19:02.335557 6308 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:19:02.335525 6308 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 13:19:02.335595 6308 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 13\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:18Z\\\",\\\"message\\\":\\\"icPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.43],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1128 13:19:17.230096 6507 services_controller.go:434] Service openshift-console-operator/metrics retrieved from lister for network=default: \\\\u0026Service{ObjectMeta:{metrics openshift-console-operator e4559ce3-2d5a-470f-b8bf-4c8b054d2335 11843 0 2025-02-23 05:38:55 +0000 UTC \\\\u003cnil\\\\u003e \\\\u003cnil\\\\u003e map[name:console-operator] map[capability.openshift.io/name:Console include.release.openshift.io/hypershift:true include.release.openshift.io/ibm-cloud-managed:true include.release.openshift.io/self-managed-high-availability:true include.release.openshift.io/single-node-developer:true service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:serving-cert service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0003197af \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.466100 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.479839 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.486791 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.487113 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.487300 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.487468 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.488327 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:28Z","lastTransitionTime":"2025-11-28T13:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.494334 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.510662 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.525185 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.541107 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.560033 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.577998 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:28Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.592308 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.592356 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.592373 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.592393 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.592408 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:28Z","lastTransitionTime":"2025-11-28T13:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.696042 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.696142 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.696162 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.696186 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.696203 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:28Z","lastTransitionTime":"2025-11-28T13:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.798971 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.799020 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.799033 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.799051 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.799067 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:28Z","lastTransitionTime":"2025-11-28T13:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.902249 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.902319 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.902336 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.902360 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:28 crc kubenswrapper[4857]: I1128 13:19:28.902378 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:28Z","lastTransitionTime":"2025-11-28T13:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.005377 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.005440 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.005457 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.005481 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.005498 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:29Z","lastTransitionTime":"2025-11-28T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.109995 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.110056 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.110075 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.110131 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.110151 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:29Z","lastTransitionTime":"2025-11-28T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.213604 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.213652 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.213664 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.213681 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.213694 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:29Z","lastTransitionTime":"2025-11-28T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.309146 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.309191 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.309196 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:29 crc kubenswrapper[4857]: E1128 13:19:29.309310 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.309365 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:29 crc kubenswrapper[4857]: E1128 13:19:29.309545 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:29 crc kubenswrapper[4857]: E1128 13:19:29.309620 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:29 crc kubenswrapper[4857]: E1128 13:19:29.309843 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.316996 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.317053 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.317070 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.317094 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.317111 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:29Z","lastTransitionTime":"2025-11-28T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.419263 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.419313 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.419325 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.419344 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.419358 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:29Z","lastTransitionTime":"2025-11-28T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.522172 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.522218 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.522228 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.522243 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.522254 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:29Z","lastTransitionTime":"2025-11-28T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.625831 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.625924 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.625944 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.625968 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.625991 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:29Z","lastTransitionTime":"2025-11-28T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.728337 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.728374 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.728389 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.728408 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.728422 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:29Z","lastTransitionTime":"2025-11-28T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.830676 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.830799 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.830829 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.830861 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.830895 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:29Z","lastTransitionTime":"2025-11-28T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.934239 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.934315 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.934339 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.934362 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:29 crc kubenswrapper[4857]: I1128 13:19:29.934382 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:29Z","lastTransitionTime":"2025-11-28T13:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.037514 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.037573 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.037590 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.037613 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.037629 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:30Z","lastTransitionTime":"2025-11-28T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.141043 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.141093 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.141107 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.141122 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.141134 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:30Z","lastTransitionTime":"2025-11-28T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.244015 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.244076 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.244096 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.244121 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.244140 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:30Z","lastTransitionTime":"2025-11-28T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.346219 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.346274 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.346291 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.346308 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.346319 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:30Z","lastTransitionTime":"2025-11-28T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.449455 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.449515 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.449524 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.449537 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.449545 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:30Z","lastTransitionTime":"2025-11-28T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.552301 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.552348 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.552362 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.552379 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.552390 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:30Z","lastTransitionTime":"2025-11-28T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.655897 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.655932 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.655942 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.655956 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.655966 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:30Z","lastTransitionTime":"2025-11-28T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.757839 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.757883 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.757895 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.757911 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.757925 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:30Z","lastTransitionTime":"2025-11-28T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.861408 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.861682 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.861694 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.861709 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.861721 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:30Z","lastTransitionTime":"2025-11-28T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.968415 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.968470 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.968484 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.968506 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:30 crc kubenswrapper[4857]: I1128 13:19:30.968520 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:30Z","lastTransitionTime":"2025-11-28T13:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.071075 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.071118 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.071129 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.071148 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.071159 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:31Z","lastTransitionTime":"2025-11-28T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.173644 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.173734 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.173746 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.173777 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.173788 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:31Z","lastTransitionTime":"2025-11-28T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.275663 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.275710 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.275722 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.275738 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.275769 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:31Z","lastTransitionTime":"2025-11-28T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.309001 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:31 crc kubenswrapper[4857]: E1128 13:19:31.309354 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.309020 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:31 crc kubenswrapper[4857]: E1128 13:19:31.309572 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.309010 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:31 crc kubenswrapper[4857]: E1128 13:19:31.309786 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.309029 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:31 crc kubenswrapper[4857]: E1128 13:19:31.309971 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.378394 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.378441 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.378450 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.378464 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.378474 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:31Z","lastTransitionTime":"2025-11-28T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.481936 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.481998 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.482018 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.482043 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.482064 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:31Z","lastTransitionTime":"2025-11-28T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.585108 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.585193 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.585207 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.585222 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.585232 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:31Z","lastTransitionTime":"2025-11-28T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.688521 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.688580 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.688598 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.688623 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.688642 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:31Z","lastTransitionTime":"2025-11-28T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.791030 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.791076 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.791093 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.791116 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.791133 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:31Z","lastTransitionTime":"2025-11-28T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.894049 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.894114 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.894128 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.894175 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.894188 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:31Z","lastTransitionTime":"2025-11-28T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.997112 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.997159 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.997170 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.997189 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:31 crc kubenswrapper[4857]: I1128 13:19:31.997202 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:31Z","lastTransitionTime":"2025-11-28T13:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.099956 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.099989 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.099999 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.100015 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.100024 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:32Z","lastTransitionTime":"2025-11-28T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.203035 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.203073 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.203084 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.203101 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.203115 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:32Z","lastTransitionTime":"2025-11-28T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.307569 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.307628 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.307637 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.307650 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.307660 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:32Z","lastTransitionTime":"2025-11-28T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.399269 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.399321 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.399337 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.399359 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.399376 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:32Z","lastTransitionTime":"2025-11-28T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:32 crc kubenswrapper[4857]: E1128 13:19:32.412941 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.419856 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.419918 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.419935 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.419956 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.419974 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:32Z","lastTransitionTime":"2025-11-28T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:32 crc kubenswrapper[4857]: E1128 13:19:32.433357 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.437741 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.437811 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.437826 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.437844 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.437857 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:32Z","lastTransitionTime":"2025-11-28T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:32 crc kubenswrapper[4857]: E1128 13:19:32.453908 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.458507 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.458567 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.458584 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.458610 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.458627 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:32Z","lastTransitionTime":"2025-11-28T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:32 crc kubenswrapper[4857]: E1128 13:19:32.473264 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.478010 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.478072 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.478089 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.478114 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.478131 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:32Z","lastTransitionTime":"2025-11-28T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:32 crc kubenswrapper[4857]: E1128 13:19:32.492272 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:32 crc kubenswrapper[4857]: E1128 13:19:32.492584 4857 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.494543 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.494623 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.494642 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.494663 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.494680 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:32Z","lastTransitionTime":"2025-11-28T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.597650 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.597703 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.597722 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.597743 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.597782 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:32Z","lastTransitionTime":"2025-11-28T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.701195 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.701258 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.701282 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.701312 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.701335 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:32Z","lastTransitionTime":"2025-11-28T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.803595 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.803722 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.803741 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.803796 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.803814 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:32Z","lastTransitionTime":"2025-11-28T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.907103 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.907135 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.907146 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.907162 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:32 crc kubenswrapper[4857]: I1128 13:19:32.907172 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:32Z","lastTransitionTime":"2025-11-28T13:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.009275 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.009319 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.009330 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.009345 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.009357 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:33Z","lastTransitionTime":"2025-11-28T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.112264 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.112295 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.112303 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.112315 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.112324 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:33Z","lastTransitionTime":"2025-11-28T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.213704 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.213743 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.213770 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.213787 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.213798 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:33Z","lastTransitionTime":"2025-11-28T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.309458 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.309481 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.309495 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:33 crc kubenswrapper[4857]: E1128 13:19:33.309566 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.309626 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:33 crc kubenswrapper[4857]: E1128 13:19:33.309918 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:33 crc kubenswrapper[4857]: E1128 13:19:33.309959 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:33 crc kubenswrapper[4857]: E1128 13:19:33.310020 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.310143 4857 scope.go:117] "RemoveContainer" containerID="ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a" Nov 28 13:19:33 crc kubenswrapper[4857]: E1128 13:19:33.310393 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-w25ss_openshift-ovn-kubernetes(bf74e995-2208-43c6-b89d-10318f55cda8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.316031 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.316058 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.316070 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.316086 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.316099 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:33Z","lastTransitionTime":"2025-11-28T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.322930 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e77ee052-7a38-4552-8c30-30bfebe79716\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6c67a288ed242cc97c05f9c8a01591c4ea3c3b8bb11e4e76d38bba7dd17f15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3e7e3b66ec5d45bfcbe5f4de7e21b540ba5bcc9859f3753465db8f992b731d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://953e18c7e26d1dbbd6f09ba86ce60483d35bd6bb271a76998acbc9e2d333a034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.337254 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.351883 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.361552 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.384228 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.400098 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.410784 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.419310 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.419370 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.419382 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.419400 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.419413 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:33Z","lastTransitionTime":"2025-11-28T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.423374 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.446512 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:18Z\\\",\\\"message\\\":\\\"icPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.43],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1128 13:19:17.230096 6507 services_controller.go:434] Service openshift-console-operator/metrics retrieved from lister for network=default: \\\\u0026Service{ObjectMeta:{metrics openshift-console-operator e4559ce3-2d5a-470f-b8bf-4c8b054d2335 11843 0 2025-02-23 05:38:55 +0000 UTC \\\\u003cnil\\\\u003e \\\\u003cnil\\\\u003e map[name:console-operator] map[capability.openshift.io/name:Console include.release.openshift.io/hypershift:true include.release.openshift.io/ibm-cloud-managed:true include.release.openshift.io/self-managed-high-availability:true include.release.openshift.io/single-node-developer:true service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:serving-cert service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0003197af \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:16Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-w25ss_openshift-ovn-kubernetes(bf74e995-2208-43c6-b89d-10318f55cda8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.463352 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.482375 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.495188 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.506987 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.517270 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.520931 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.520958 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.520968 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.520980 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.520989 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:33Z","lastTransitionTime":"2025-11-28T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.527970 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.540958 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.549974 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jspn8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ab9b94a-66a7-4d68-8046-d6d97595330d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jspn8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.561341 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.624306 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.624359 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.624375 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.624396 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.624411 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:33Z","lastTransitionTime":"2025-11-28T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.727225 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.727273 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.727286 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.727302 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.727314 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:33Z","lastTransitionTime":"2025-11-28T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.831032 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.831073 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.831082 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.831095 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.831104 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:33Z","lastTransitionTime":"2025-11-28T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.933564 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.933598 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.933606 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.933620 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:33 crc kubenswrapper[4857]: I1128 13:19:33.933629 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:33Z","lastTransitionTime":"2025-11-28T13:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.036552 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.036583 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.036593 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.036605 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.036614 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:34Z","lastTransitionTime":"2025-11-28T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.138836 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.138914 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.138936 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.138963 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.138980 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:34Z","lastTransitionTime":"2025-11-28T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.242352 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.242406 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.242415 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.242432 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.242443 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:34Z","lastTransitionTime":"2025-11-28T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.345606 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.345654 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.345666 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.345684 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.345696 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:34Z","lastTransitionTime":"2025-11-28T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.448456 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.448516 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.448536 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.448560 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.448587 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:34Z","lastTransitionTime":"2025-11-28T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.551466 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.551525 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.551542 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.551566 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.551582 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:34Z","lastTransitionTime":"2025-11-28T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.654898 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.654955 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.654976 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.655002 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.655023 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:34Z","lastTransitionTime":"2025-11-28T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.757172 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.757540 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.757698 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.757897 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.758071 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:34Z","lastTransitionTime":"2025-11-28T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.861497 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.861528 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.861537 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.861549 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.861557 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:34Z","lastTransitionTime":"2025-11-28T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.963165 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.963198 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.963208 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.963224 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:34 crc kubenswrapper[4857]: I1128 13:19:34.963235 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:34Z","lastTransitionTime":"2025-11-28T13:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.066188 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.066239 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.066255 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.066279 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.066297 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:35Z","lastTransitionTime":"2025-11-28T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.168152 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.168192 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.168203 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.168217 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.168227 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:35Z","lastTransitionTime":"2025-11-28T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.271204 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.271248 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.271272 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.271291 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.271306 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:35Z","lastTransitionTime":"2025-11-28T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.308734 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.308778 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.308742 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:35 crc kubenswrapper[4857]: E1128 13:19:35.308882 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:35 crc kubenswrapper[4857]: E1128 13:19:35.308955 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:35 crc kubenswrapper[4857]: E1128 13:19:35.309028 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.309177 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:35 crc kubenswrapper[4857]: E1128 13:19:35.309342 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.374230 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.374480 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.374718 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.374973 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.375194 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:35Z","lastTransitionTime":"2025-11-28T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.478174 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.478210 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.478223 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.478240 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.478253 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:35Z","lastTransitionTime":"2025-11-28T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.581351 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.581392 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.581412 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.581434 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.581450 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:35Z","lastTransitionTime":"2025-11-28T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.683970 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.684009 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.684019 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.684035 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.684046 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:35Z","lastTransitionTime":"2025-11-28T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.710705 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs\") pod \"network-metrics-daemon-jspn8\" (UID: \"9ab9b94a-66a7-4d68-8046-d6d97595330d\") " pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:35 crc kubenswrapper[4857]: E1128 13:19:35.710862 4857 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:19:35 crc kubenswrapper[4857]: E1128 13:19:35.710924 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs podName:9ab9b94a-66a7-4d68-8046-d6d97595330d nodeName:}" failed. No retries permitted until 2025-11-28 13:20:07.710905588 +0000 UTC m=+99.738280755 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs") pod "network-metrics-daemon-jspn8" (UID: "9ab9b94a-66a7-4d68-8046-d6d97595330d") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.786640 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.786686 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.786696 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.786711 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.786721 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:35Z","lastTransitionTime":"2025-11-28T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.888846 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.889281 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.889398 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.889566 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.889649 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:35Z","lastTransitionTime":"2025-11-28T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.991891 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.991929 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.991941 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.991957 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:35 crc kubenswrapper[4857]: I1128 13:19:35.991967 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:35Z","lastTransitionTime":"2025-11-28T13:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.094695 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.094763 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.094774 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.094787 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.094796 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:36Z","lastTransitionTime":"2025-11-28T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.197038 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.197085 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.197096 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.197112 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.197123 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:36Z","lastTransitionTime":"2025-11-28T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.299359 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.299394 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.299402 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.299417 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.299427 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:36Z","lastTransitionTime":"2025-11-28T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.401999 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.402032 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.402040 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.402057 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.402065 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:36Z","lastTransitionTime":"2025-11-28T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.504337 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.504377 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.504385 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.504402 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.504412 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:36Z","lastTransitionTime":"2025-11-28T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.606576 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.606612 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.606623 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.606640 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.606677 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:36Z","lastTransitionTime":"2025-11-28T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.709248 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.709298 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.709311 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.709328 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.709340 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:36Z","lastTransitionTime":"2025-11-28T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.773942 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-tzg2g_1031bdc4-d6c6-4425-805b-506069f5667d/kube-multus/0.log" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.774015 4857 generic.go:334] "Generic (PLEG): container finished" podID="1031bdc4-d6c6-4425-805b-506069f5667d" containerID="88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a" exitCode=1 Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.774056 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-tzg2g" event={"ID":"1031bdc4-d6c6-4425-805b-506069f5667d","Type":"ContainerDied","Data":"88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a"} Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.774548 4857 scope.go:117] "RemoveContainer" containerID="88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.791452 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.811212 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.811247 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.811257 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.811273 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.811284 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:36Z","lastTransitionTime":"2025-11-28T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.816006 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.841422 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:18Z\\\",\\\"message\\\":\\\"icPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.43],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1128 13:19:17.230096 6507 services_controller.go:434] Service openshift-console-operator/metrics retrieved from lister for network=default: \\\\u0026Service{ObjectMeta:{metrics openshift-console-operator e4559ce3-2d5a-470f-b8bf-4c8b054d2335 11843 0 2025-02-23 05:38:55 +0000 UTC \\\\u003cnil\\\\u003e \\\\u003cnil\\\\u003e map[name:console-operator] map[capability.openshift.io/name:Console include.release.openshift.io/hypershift:true include.release.openshift.io/ibm-cloud-managed:true include.release.openshift.io/self-managed-high-availability:true include.release.openshift.io/single-node-developer:true service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:serving-cert service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0003197af \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:16Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-w25ss_openshift-ovn-kubernetes(bf74e995-2208-43c6-b89d-10318f55cda8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.853101 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.867298 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.880385 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.897525 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:35Z\\\",\\\"message\\\":\\\"2025-11-28T13:18:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_72286343-065b-4a05-bba3-15806f0e4dc4\\\\n2025-11-28T13:18:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_72286343-065b-4a05-bba3-15806f0e4dc4 to /host/opt/cni/bin/\\\\n2025-11-28T13:18:50Z [verbose] multus-daemon started\\\\n2025-11-28T13:18:50Z [verbose] Readiness Indicator file check\\\\n2025-11-28T13:19:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.912930 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.915893 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.915955 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.915974 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.916004 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.916025 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:36Z","lastTransitionTime":"2025-11-28T13:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.925866 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.945839 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.959851 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jspn8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ab9b94a-66a7-4d68-8046-d6d97595330d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jspn8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.973422 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.985790 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e77ee052-7a38-4552-8c30-30bfebe79716\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6c67a288ed242cc97c05f9c8a01591c4ea3c3b8bb11e4e76d38bba7dd17f15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3e7e3b66ec5d45bfcbe5f4de7e21b540ba5bcc9859f3753465db8f992b731d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://953e18c7e26d1dbbd6f09ba86ce60483d35bd6bb271a76998acbc9e2d333a034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:36 crc kubenswrapper[4857]: I1128 13:19:36.999459 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.012428 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.018606 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.018655 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.018664 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.018677 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.018686 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:37Z","lastTransitionTime":"2025-11-28T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.021171 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.037940 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.052874 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.120724 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.120776 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.120786 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.120800 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.120811 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:37Z","lastTransitionTime":"2025-11-28T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.223307 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.223342 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.223350 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.223363 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.223372 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:37Z","lastTransitionTime":"2025-11-28T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.309544 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.309581 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.309562 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.309578 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:37 crc kubenswrapper[4857]: E1128 13:19:37.309699 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:37 crc kubenswrapper[4857]: E1128 13:19:37.309912 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:37 crc kubenswrapper[4857]: E1128 13:19:37.309939 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:37 crc kubenswrapper[4857]: E1128 13:19:37.310001 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.325855 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.325887 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.325896 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.325926 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.325939 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:37Z","lastTransitionTime":"2025-11-28T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.428131 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.428180 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.428192 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.428210 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.428222 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:37Z","lastTransitionTime":"2025-11-28T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.530705 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.530775 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.530784 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.530798 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.530809 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:37Z","lastTransitionTime":"2025-11-28T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.632876 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.632937 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.632953 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.632975 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.632995 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:37Z","lastTransitionTime":"2025-11-28T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.735320 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.735374 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.735392 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.735417 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.735469 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:37Z","lastTransitionTime":"2025-11-28T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.779157 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-tzg2g_1031bdc4-d6c6-4425-805b-506069f5667d/kube-multus/0.log" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.779249 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-tzg2g" event={"ID":"1031bdc4-d6c6-4425-805b-506069f5667d","Type":"ContainerStarted","Data":"81125daca139d6b77545a7ffee9064cd2fd693de61ae093e889ec72440be4856"} Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.798217 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.813998 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.824606 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jspn8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ab9b94a-66a7-4d68-8046-d6d97595330d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jspn8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.837888 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.837947 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.837967 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.837989 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.838008 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:37Z","lastTransitionTime":"2025-11-28T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.839202 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.857225 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e77ee052-7a38-4552-8c30-30bfebe79716\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6c67a288ed242cc97c05f9c8a01591c4ea3c3b8bb11e4e76d38bba7dd17f15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3e7e3b66ec5d45bfcbe5f4de7e21b540ba5bcc9859f3753465db8f992b731d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://953e18c7e26d1dbbd6f09ba86ce60483d35bd6bb271a76998acbc9e2d333a034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.875102 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.886674 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.898007 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.920589 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.942213 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.942240 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.942252 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.942266 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.942276 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:37Z","lastTransitionTime":"2025-11-28T13:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.946609 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:18Z\\\",\\\"message\\\":\\\"icPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.43],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1128 13:19:17.230096 6507 services_controller.go:434] Service openshift-console-operator/metrics retrieved from lister for network=default: \\\\u0026Service{ObjectMeta:{metrics openshift-console-operator e4559ce3-2d5a-470f-b8bf-4c8b054d2335 11843 0 2025-02-23 05:38:55 +0000 UTC \\\\u003cnil\\\\u003e \\\\u003cnil\\\\u003e map[name:console-operator] map[capability.openshift.io/name:Console include.release.openshift.io/hypershift:true include.release.openshift.io/ibm-cloud-managed:true include.release.openshift.io/self-managed-high-availability:true include.release.openshift.io/single-node-developer:true service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:serving-cert service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0003197af \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:16Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-w25ss_openshift-ovn-kubernetes(bf74e995-2208-43c6-b89d-10318f55cda8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.961252 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.977339 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:37 crc kubenswrapper[4857]: I1128 13:19:37.993999 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.004790 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.015305 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.025652 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.041059 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81125daca139d6b77545a7ffee9064cd2fd693de61ae093e889ec72440be4856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:35Z\\\",\\\"message\\\":\\\"2025-11-28T13:18:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_72286343-065b-4a05-bba3-15806f0e4dc4\\\\n2025-11-28T13:18:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_72286343-065b-4a05-bba3-15806f0e4dc4 to /host/opt/cni/bin/\\\\n2025-11-28T13:18:50Z [verbose] multus-daemon started\\\\n2025-11-28T13:18:50Z [verbose] Readiness Indicator file check\\\\n2025-11-28T13:19:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.045362 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.045401 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.045414 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.045429 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.045439 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:38Z","lastTransitionTime":"2025-11-28T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.058463 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.147419 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.147688 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.147827 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.147918 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.148008 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:38Z","lastTransitionTime":"2025-11-28T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.250524 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.250951 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.251124 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.251300 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.251472 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:38Z","lastTransitionTime":"2025-11-28T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.323318 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.342330 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.351586 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jspn8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ab9b94a-66a7-4d68-8046-d6d97595330d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jspn8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.354479 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.354513 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.354525 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.354542 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.354554 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:38Z","lastTransitionTime":"2025-11-28T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.367515 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.377671 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.397294 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.415178 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.426176 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e77ee052-7a38-4552-8c30-30bfebe79716\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6c67a288ed242cc97c05f9c8a01591c4ea3c3b8bb11e4e76d38bba7dd17f15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3e7e3b66ec5d45bfcbe5f4de7e21b540ba5bcc9859f3753465db8f992b731d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://953e18c7e26d1dbbd6f09ba86ce60483d35bd6bb271a76998acbc9e2d333a034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.441835 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.457198 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.457431 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.457501 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.457563 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.457858 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:38Z","lastTransitionTime":"2025-11-28T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.458209 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.475305 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:18Z\\\",\\\"message\\\":\\\"icPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.43],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1128 13:19:17.230096 6507 services_controller.go:434] Service openshift-console-operator/metrics retrieved from lister for network=default: \\\\u0026Service{ObjectMeta:{metrics openshift-console-operator e4559ce3-2d5a-470f-b8bf-4c8b054d2335 11843 0 2025-02-23 05:38:55 +0000 UTC \\\\u003cnil\\\\u003e \\\\u003cnil\\\\u003e map[name:console-operator] map[capability.openshift.io/name:Console include.release.openshift.io/hypershift:true include.release.openshift.io/ibm-cloud-managed:true include.release.openshift.io/self-managed-high-availability:true include.release.openshift.io/single-node-developer:true service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:serving-cert service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0003197af \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:16Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-w25ss_openshift-ovn-kubernetes(bf74e995-2208-43c6-b89d-10318f55cda8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.485141 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.497326 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.510583 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81125daca139d6b77545a7ffee9064cd2fd693de61ae093e889ec72440be4856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:35Z\\\",\\\"message\\\":\\\"2025-11-28T13:18:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_72286343-065b-4a05-bba3-15806f0e4dc4\\\\n2025-11-28T13:18:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_72286343-065b-4a05-bba3-15806f0e4dc4 to /host/opt/cni/bin/\\\\n2025-11-28T13:18:50Z [verbose] multus-daemon started\\\\n2025-11-28T13:18:50Z [verbose] Readiness Indicator file check\\\\n2025-11-28T13:19:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.522605 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.534553 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.546980 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.556160 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.561054 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.561084 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.561095 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.561111 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.561122 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:38Z","lastTransitionTime":"2025-11-28T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.662905 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.663155 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.663228 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.663295 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.663357 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:38Z","lastTransitionTime":"2025-11-28T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.766301 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.766351 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.766363 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.766379 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.766390 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:38Z","lastTransitionTime":"2025-11-28T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.869072 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.869161 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.869191 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.869222 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.869245 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:38Z","lastTransitionTime":"2025-11-28T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.972229 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.972288 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.972300 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.972318 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:38 crc kubenswrapper[4857]: I1128 13:19:38.972327 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:38Z","lastTransitionTime":"2025-11-28T13:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.074821 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.074887 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.074923 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.074967 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.074993 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:39Z","lastTransitionTime":"2025-11-28T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.177538 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.177576 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.177585 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.177597 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.177606 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:39Z","lastTransitionTime":"2025-11-28T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.279888 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.280188 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.280323 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.280463 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.280563 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:39Z","lastTransitionTime":"2025-11-28T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.309524 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.309691 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.309597 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.309534 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:39 crc kubenswrapper[4857]: E1128 13:19:39.309975 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:39 crc kubenswrapper[4857]: E1128 13:19:39.310116 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:39 crc kubenswrapper[4857]: E1128 13:19:39.310251 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:39 crc kubenswrapper[4857]: E1128 13:19:39.310498 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.382697 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.382743 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.382784 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.382807 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.382823 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:39Z","lastTransitionTime":"2025-11-28T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.485591 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.485633 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.485644 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.485664 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.485675 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:39Z","lastTransitionTime":"2025-11-28T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.588649 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.588692 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.588701 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.588723 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.588732 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:39Z","lastTransitionTime":"2025-11-28T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.691141 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.691205 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.691224 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.691249 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.691267 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:39Z","lastTransitionTime":"2025-11-28T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.794063 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.794360 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.794374 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.794391 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.794405 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:39Z","lastTransitionTime":"2025-11-28T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.896253 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.896292 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.896305 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.896322 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.896334 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:39Z","lastTransitionTime":"2025-11-28T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.999357 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.999395 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.999406 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.999421 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:39 crc kubenswrapper[4857]: I1128 13:19:39.999430 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:39Z","lastTransitionTime":"2025-11-28T13:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.102153 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.102188 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.102198 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.102212 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.102221 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:40Z","lastTransitionTime":"2025-11-28T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.204273 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.204325 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.204340 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.204362 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.204376 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:40Z","lastTransitionTime":"2025-11-28T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.306648 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.306713 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.306731 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.306785 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.306826 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:40Z","lastTransitionTime":"2025-11-28T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.409716 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.409781 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.409791 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.409806 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.409818 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:40Z","lastTransitionTime":"2025-11-28T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.512810 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.512895 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.512909 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.512927 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.512938 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:40Z","lastTransitionTime":"2025-11-28T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.616005 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.616080 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.616102 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.616134 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.616153 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:40Z","lastTransitionTime":"2025-11-28T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.718743 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.718813 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.718829 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.718850 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.718866 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:40Z","lastTransitionTime":"2025-11-28T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.821331 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.821429 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.821446 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.821469 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.821487 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:40Z","lastTransitionTime":"2025-11-28T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.924536 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.924605 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.924624 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.924647 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:40 crc kubenswrapper[4857]: I1128 13:19:40.924669 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:40Z","lastTransitionTime":"2025-11-28T13:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.027181 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.027834 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.027865 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.027890 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.027909 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:41Z","lastTransitionTime":"2025-11-28T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.130389 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.130421 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.130430 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.130442 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.130451 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:41Z","lastTransitionTime":"2025-11-28T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.232256 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.232301 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.232310 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.232329 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.232341 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:41Z","lastTransitionTime":"2025-11-28T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.309405 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.309540 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:41 crc kubenswrapper[4857]: E1128 13:19:41.309555 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.309569 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.309597 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:41 crc kubenswrapper[4857]: E1128 13:19:41.309839 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:41 crc kubenswrapper[4857]: E1128 13:19:41.310080 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:41 crc kubenswrapper[4857]: E1128 13:19:41.310215 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.342606 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.342673 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.342693 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.342727 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.342745 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:41Z","lastTransitionTime":"2025-11-28T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.445666 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.445727 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.445827 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.445873 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.445895 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:41Z","lastTransitionTime":"2025-11-28T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.549885 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.549932 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.549943 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.549960 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.549972 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:41Z","lastTransitionTime":"2025-11-28T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.655661 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.655723 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.655744 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.655810 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.655834 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:41Z","lastTransitionTime":"2025-11-28T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.759510 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.759578 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.759591 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.759615 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.759630 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:41Z","lastTransitionTime":"2025-11-28T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.862578 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.862659 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.862684 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.862714 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.862735 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:41Z","lastTransitionTime":"2025-11-28T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.965622 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.965687 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.965705 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.965730 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:41 crc kubenswrapper[4857]: I1128 13:19:41.965747 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:41Z","lastTransitionTime":"2025-11-28T13:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.069194 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.069277 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.069304 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.069475 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.069516 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:42Z","lastTransitionTime":"2025-11-28T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.172635 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.172704 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.172724 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.172795 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.172817 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:42Z","lastTransitionTime":"2025-11-28T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.275446 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.275489 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.275503 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.275522 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.275538 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:42Z","lastTransitionTime":"2025-11-28T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.378284 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.378538 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.378605 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.378670 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.378740 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:42Z","lastTransitionTime":"2025-11-28T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.481111 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.481187 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.481206 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.481231 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.481249 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:42Z","lastTransitionTime":"2025-11-28T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.584781 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.584839 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.584855 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.584875 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.584888 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:42Z","lastTransitionTime":"2025-11-28T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.687890 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.687933 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.687947 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.687963 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.687976 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:42Z","lastTransitionTime":"2025-11-28T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.792839 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.792897 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.792913 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.792934 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.792949 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:42Z","lastTransitionTime":"2025-11-28T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.864478 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.864512 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.864522 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.864565 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.864574 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:42Z","lastTransitionTime":"2025-11-28T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:42 crc kubenswrapper[4857]: E1128 13:19:42.880723 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.884407 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.884449 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.884464 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.884481 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.884493 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:42Z","lastTransitionTime":"2025-11-28T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:42 crc kubenswrapper[4857]: E1128 13:19:42.905048 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.910199 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.910264 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.910274 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.910297 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.910310 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:42Z","lastTransitionTime":"2025-11-28T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:42 crc kubenswrapper[4857]: E1128 13:19:42.921899 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.925917 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.925956 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.925966 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.925983 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.925995 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:42Z","lastTransitionTime":"2025-11-28T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:42 crc kubenswrapper[4857]: E1128 13:19:42.940007 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.944505 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.944687 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.944813 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.944912 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.945005 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:42Z","lastTransitionTime":"2025-11-28T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:42 crc kubenswrapper[4857]: E1128 13:19:42.964055 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:42 crc kubenswrapper[4857]: E1128 13:19:42.964441 4857 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.966236 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.966348 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.966446 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.966556 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:42 crc kubenswrapper[4857]: I1128 13:19:42.966657 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:42Z","lastTransitionTime":"2025-11-28T13:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.069527 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.069569 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.069578 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.069593 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.069604 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:43Z","lastTransitionTime":"2025-11-28T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.176104 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.176163 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.176181 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.176204 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.176222 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:43Z","lastTransitionTime":"2025-11-28T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.278995 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.279062 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.279072 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.279092 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.279104 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:43Z","lastTransitionTime":"2025-11-28T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.309108 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.309138 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.309176 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.309217 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:43 crc kubenswrapper[4857]: E1128 13:19:43.310279 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:43 crc kubenswrapper[4857]: E1128 13:19:43.310418 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:43 crc kubenswrapper[4857]: E1128 13:19:43.310643 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:43 crc kubenswrapper[4857]: E1128 13:19:43.310795 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.323409 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.381580 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.381618 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.381626 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.381640 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.381649 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:43Z","lastTransitionTime":"2025-11-28T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.484519 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.484577 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.484594 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.484619 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.484651 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:43Z","lastTransitionTime":"2025-11-28T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.587102 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.587419 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.587565 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.587656 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.587778 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:43Z","lastTransitionTime":"2025-11-28T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.690971 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.691022 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.691039 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.691061 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.691078 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:43Z","lastTransitionTime":"2025-11-28T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.792776 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.793017 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.793083 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.793146 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.793210 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:43Z","lastTransitionTime":"2025-11-28T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.895666 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.895704 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.895716 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.895731 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.895742 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:43Z","lastTransitionTime":"2025-11-28T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.998680 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.998786 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.998806 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.998833 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:43 crc kubenswrapper[4857]: I1128 13:19:43.998852 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:43Z","lastTransitionTime":"2025-11-28T13:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.101917 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.101989 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.102007 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.102032 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.102049 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:44Z","lastTransitionTime":"2025-11-28T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.205265 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.205330 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.205351 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.205381 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.205403 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:44Z","lastTransitionTime":"2025-11-28T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.308555 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.309004 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.309263 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.309489 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.309697 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:44Z","lastTransitionTime":"2025-11-28T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.412474 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.412915 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.413113 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.413251 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.413411 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:44Z","lastTransitionTime":"2025-11-28T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.516524 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.517057 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.517251 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.517465 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.517706 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:44Z","lastTransitionTime":"2025-11-28T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.621869 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.621933 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.621951 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.621972 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.621987 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:44Z","lastTransitionTime":"2025-11-28T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.725212 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.725268 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.725286 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.725311 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.725329 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:44Z","lastTransitionTime":"2025-11-28T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.828550 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.828632 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.828656 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.828690 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.828712 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:44Z","lastTransitionTime":"2025-11-28T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.932148 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.932484 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.932629 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.932861 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:44 crc kubenswrapper[4857]: I1128 13:19:44.933035 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:44Z","lastTransitionTime":"2025-11-28T13:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.035371 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.035406 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.035414 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.035430 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.035442 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:45Z","lastTransitionTime":"2025-11-28T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.137476 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.138094 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.138185 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.138279 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.138361 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:45Z","lastTransitionTime":"2025-11-28T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.240745 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.240822 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.240836 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.240852 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.240864 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:45Z","lastTransitionTime":"2025-11-28T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.308543 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:45 crc kubenswrapper[4857]: E1128 13:19:45.308666 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.308881 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.308915 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:45 crc kubenswrapper[4857]: E1128 13:19:45.308941 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:45 crc kubenswrapper[4857]: E1128 13:19:45.309045 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.309072 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:45 crc kubenswrapper[4857]: E1128 13:19:45.309132 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.310098 4857 scope.go:117] "RemoveContainer" containerID="ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.342854 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.342918 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.342932 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.342948 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.342961 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:45Z","lastTransitionTime":"2025-11-28T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.445220 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.445260 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.445269 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.445283 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.445293 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:45Z","lastTransitionTime":"2025-11-28T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.548012 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.548067 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.548085 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.548108 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.548125 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:45Z","lastTransitionTime":"2025-11-28T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.650937 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.650999 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.651021 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.651049 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.651070 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:45Z","lastTransitionTime":"2025-11-28T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.753884 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.753963 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.753988 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.754015 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.754037 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:45Z","lastTransitionTime":"2025-11-28T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.856565 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.856986 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.857156 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.857359 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.857504 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:45Z","lastTransitionTime":"2025-11-28T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.961392 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.961735 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.962165 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.962682 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:45 crc kubenswrapper[4857]: I1128 13:19:45.963203 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:45Z","lastTransitionTime":"2025-11-28T13:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.066500 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.066533 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.066546 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.066563 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.066574 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:46Z","lastTransitionTime":"2025-11-28T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.169530 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.169563 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.169572 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.169586 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.169597 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:46Z","lastTransitionTime":"2025-11-28T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.272265 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.272326 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.272346 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.272378 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.272402 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:46Z","lastTransitionTime":"2025-11-28T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.374774 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.374828 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.374840 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.374858 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.374871 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:46Z","lastTransitionTime":"2025-11-28T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.477356 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.477405 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.477417 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.477434 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.477449 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:46Z","lastTransitionTime":"2025-11-28T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.580010 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.580067 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.580077 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.580098 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.580114 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:46Z","lastTransitionTime":"2025-11-28T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.682397 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.682459 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.682475 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.682497 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.682511 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:46Z","lastTransitionTime":"2025-11-28T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.785355 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.785400 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.785412 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.785430 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.785444 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:46Z","lastTransitionTime":"2025-11-28T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.810067 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovnkube-controller/2.log" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.812671 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerStarted","Data":"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f"} Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.813258 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.829171 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.840670 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.852054 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.871985 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.887446 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.888428 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.888470 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.888479 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.888495 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.888511 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:46Z","lastTransitionTime":"2025-11-28T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.902415 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e77ee052-7a38-4552-8c30-30bfebe79716\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6c67a288ed242cc97c05f9c8a01591c4ea3c3b8bb11e4e76d38bba7dd17f15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3e7e3b66ec5d45bfcbe5f4de7e21b540ba5bcc9859f3753465db8f992b731d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://953e18c7e26d1dbbd6f09ba86ce60483d35bd6bb271a76998acbc9e2d333a034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.915375 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.934545 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:18Z\\\",\\\"message\\\":\\\"icPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.43],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1128 13:19:17.230096 6507 services_controller.go:434] Service openshift-console-operator/metrics retrieved from lister for network=default: \\\\u0026Service{ObjectMeta:{metrics openshift-console-operator e4559ce3-2d5a-470f-b8bf-4c8b054d2335 11843 0 2025-02-23 05:38:55 +0000 UTC \\\\u003cnil\\\\u003e \\\\u003cnil\\\\u003e map[name:console-operator] map[capability.openshift.io/name:Console include.release.openshift.io/hypershift:true include.release.openshift.io/ibm-cloud-managed:true include.release.openshift.io/self-managed-high-availability:true include.release.openshift.io/single-node-developer:true service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:serving-cert service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0003197af \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:16Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.944374 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.953453 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.964220 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.975422 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81125daca139d6b77545a7ffee9064cd2fd693de61ae093e889ec72440be4856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:35Z\\\",\\\"message\\\":\\\"2025-11-28T13:18:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_72286343-065b-4a05-bba3-15806f0e4dc4\\\\n2025-11-28T13:18:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_72286343-065b-4a05-bba3-15806f0e4dc4 to /host/opt/cni/bin/\\\\n2025-11-28T13:18:50Z [verbose] multus-daemon started\\\\n2025-11-28T13:18:50Z [verbose] Readiness Indicator file check\\\\n2025-11-28T13:19:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.990831 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.990859 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.990870 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.990890 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.990904 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:46Z","lastTransitionTime":"2025-11-28T13:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:46 crc kubenswrapper[4857]: I1128 13:19:46.992881 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.011722 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.025190 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.036989 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jspn8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ab9b94a-66a7-4d68-8046-d6d97595330d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jspn8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.048738 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2189768-374b-4182-b7d6-855295893cd9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://512c4d00871836ce981f36aec4bd31095bd0d35afbac52016837aa0aad7d337b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5335e99c6c1f70658778ab57280d7b8cb2ab151b9f523bd1cc42354ef53f76ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5335e99c6c1f70658778ab57280d7b8cb2ab151b9f523bd1cc42354ef53f76ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.061165 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.079173 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.093344 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.093401 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.093420 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.093443 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.093460 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:47Z","lastTransitionTime":"2025-11-28T13:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.198051 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.198102 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.198111 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.198135 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.198147 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:47Z","lastTransitionTime":"2025-11-28T13:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.301202 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.301279 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.301300 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.301335 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.301356 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:47Z","lastTransitionTime":"2025-11-28T13:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.309376 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.309451 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.309476 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:47 crc kubenswrapper[4857]: E1128 13:19:47.309528 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.309477 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:47 crc kubenswrapper[4857]: E1128 13:19:47.309707 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:47 crc kubenswrapper[4857]: E1128 13:19:47.309738 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:47 crc kubenswrapper[4857]: E1128 13:19:47.309807 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.405311 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.405363 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.405376 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.405394 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.405407 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:47Z","lastTransitionTime":"2025-11-28T13:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.509069 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.509131 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.509149 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.509174 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.509192 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:47Z","lastTransitionTime":"2025-11-28T13:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.613143 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.613244 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.613272 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.613310 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.613333 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:47Z","lastTransitionTime":"2025-11-28T13:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.716620 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.716678 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.716695 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.716720 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.716739 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:47Z","lastTransitionTime":"2025-11-28T13:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.818634 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.818689 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.818707 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.818733 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.818782 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:47Z","lastTransitionTime":"2025-11-28T13:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.819903 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovnkube-controller/3.log" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.820840 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovnkube-controller/2.log" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.824273 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf74e995-2208-43c6-b89d-10318f55cda8" containerID="4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f" exitCode=1 Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.824325 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerDied","Data":"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f"} Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.824379 4857 scope.go:117] "RemoveContainer" containerID="ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.825256 4857 scope.go:117] "RemoveContainer" containerID="4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f" Nov 28 13:19:47 crc kubenswrapper[4857]: E1128 13:19:47.825578 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-w25ss_openshift-ovn-kubernetes(bf74e995-2208-43c6-b89d-10318f55cda8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.840694 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.854108 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.868868 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.886999 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81125daca139d6b77545a7ffee9064cd2fd693de61ae093e889ec72440be4856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:35Z\\\",\\\"message\\\":\\\"2025-11-28T13:18:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_72286343-065b-4a05-bba3-15806f0e4dc4\\\\n2025-11-28T13:18:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_72286343-065b-4a05-bba3-15806f0e4dc4 to /host/opt/cni/bin/\\\\n2025-11-28T13:18:50Z [verbose] multus-daemon started\\\\n2025-11-28T13:18:50Z [verbose] Readiness Indicator file check\\\\n2025-11-28T13:19:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.902375 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.920025 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.922282 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.922336 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.922348 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.922368 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.922381 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:47Z","lastTransitionTime":"2025-11-28T13:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.940822 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.952142 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jspn8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ab9b94a-66a7-4d68-8046-d6d97595330d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jspn8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.963566 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2189768-374b-4182-b7d6-855295893cd9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://512c4d00871836ce981f36aec4bd31095bd0d35afbac52016837aa0aad7d337b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5335e99c6c1f70658778ab57280d7b8cb2ab151b9f523bd1cc42354ef53f76ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5335e99c6c1f70658778ab57280d7b8cb2ab151b9f523bd1cc42354ef53f76ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.976172 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:47 crc kubenswrapper[4857]: I1128 13:19:47.988914 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e77ee052-7a38-4552-8c30-30bfebe79716\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6c67a288ed242cc97c05f9c8a01591c4ea3c3b8bb11e4e76d38bba7dd17f15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3e7e3b66ec5d45bfcbe5f4de7e21b540ba5bcc9859f3753465db8f992b731d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://953e18c7e26d1dbbd6f09ba86ce60483d35bd6bb271a76998acbc9e2d333a034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.008236 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.024761 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.024802 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.024812 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.024830 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.024840 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:48Z","lastTransitionTime":"2025-11-28T13:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.025110 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.037621 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.061866 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.077088 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.090173 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.106360 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.128385 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.128447 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.128460 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.128482 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.128498 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:48Z","lastTransitionTime":"2025-11-28T13:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.130737 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:18Z\\\",\\\"message\\\":\\\"icPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.43],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1128 13:19:17.230096 6507 services_controller.go:434] Service openshift-console-operator/metrics retrieved from lister for network=default: \\\\u0026Service{ObjectMeta:{metrics openshift-console-operator e4559ce3-2d5a-470f-b8bf-4c8b054d2335 11843 0 2025-02-23 05:38:55 +0000 UTC \\\\u003cnil\\\\u003e \\\\u003cnil\\\\u003e map[name:console-operator] map[capability.openshift.io/name:Console include.release.openshift.io/hypershift:true include.release.openshift.io/ibm-cloud-managed:true include.release.openshift.io/self-managed-high-availability:true include.release.openshift.io/single-node-developer:true service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:serving-cert service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0003197af \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:16Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:47Z\\\",\\\"message\\\":\\\"andler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:19:46.747865 6865 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1128 13:19:46.747867 6865 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:19:46.747931 6865 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 13:19:46.747986 6865 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1128 13:19:46.747999 6865 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1128 13:19:46.748037 6865 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 13:19:46.748053 6865 factory.go:656] Stopping watch factory\\\\nI1128 13:19:46.748058 6865 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 13:19:46.748058 6865 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:19:46.748071 6865 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:19:46.748087 6865 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 13:19:46.748336 6865 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1128 13:19:46.748461 6865 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1128 13:19:46.748525 6865 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:19:46.748565 6865 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 13:19:46.748669 6865 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.232435 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.232517 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.232541 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.232568 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.232588 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:48Z","lastTransitionTime":"2025-11-28T13:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.335492 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.335552 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.335572 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.335598 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.335620 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:48Z","lastTransitionTime":"2025-11-28T13:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.342489 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9de00059ffcc54d321d43ddbc0ee9af3991cb33e25efcbef95c79e0b983f3a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:18Z\\\",\\\"message\\\":\\\"icPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.43],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1128 13:19:17.230096 6507 services_controller.go:434] Service openshift-console-operator/metrics retrieved from lister for network=default: \\\\u0026Service{ObjectMeta:{metrics openshift-console-operator e4559ce3-2d5a-470f-b8bf-4c8b054d2335 11843 0 2025-02-23 05:38:55 +0000 UTC \\\\u003cnil\\\\u003e \\\\u003cnil\\\\u003e map[name:console-operator] map[capability.openshift.io/name:Console include.release.openshift.io/hypershift:true include.release.openshift.io/ibm-cloud-managed:true include.release.openshift.io/self-managed-high-availability:true include.release.openshift.io/single-node-developer:true service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:serving-cert service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0003197af \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:16Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:47Z\\\",\\\"message\\\":\\\"andler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:19:46.747865 6865 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1128 13:19:46.747867 6865 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:19:46.747931 6865 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 13:19:46.747986 6865 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1128 13:19:46.747999 6865 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1128 13:19:46.748037 6865 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 13:19:46.748053 6865 factory.go:656] Stopping watch factory\\\\nI1128 13:19:46.748058 6865 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 13:19:46.748058 6865 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:19:46.748071 6865 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:19:46.748087 6865 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 13:19:46.748336 6865 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1128 13:19:46.748461 6865 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1128 13:19:46.748525 6865 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:19:46.748565 6865 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 13:19:46.748669 6865 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.357357 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.374663 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.396396 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.415031 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.429911 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.479618 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.479648 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.479656 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.479669 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.479679 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:48Z","lastTransitionTime":"2025-11-28T13:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.490262 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.509764 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81125daca139d6b77545a7ffee9064cd2fd693de61ae093e889ec72440be4856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:35Z\\\",\\\"message\\\":\\\"2025-11-28T13:18:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_72286343-065b-4a05-bba3-15806f0e4dc4\\\\n2025-11-28T13:18:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_72286343-065b-4a05-bba3-15806f0e4dc4 to /host/opt/cni/bin/\\\\n2025-11-28T13:18:50Z [verbose] multus-daemon started\\\\n2025-11-28T13:18:50Z [verbose] Readiness Indicator file check\\\\n2025-11-28T13:19:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.525022 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.540416 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.561808 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.576871 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jspn8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ab9b94a-66a7-4d68-8046-d6d97595330d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jspn8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.582160 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.582222 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.582240 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.582264 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.582283 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:48Z","lastTransitionTime":"2025-11-28T13:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.590528 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2189768-374b-4182-b7d6-855295893cd9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://512c4d00871836ce981f36aec4bd31095bd0d35afbac52016837aa0aad7d337b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5335e99c6c1f70658778ab57280d7b8cb2ab151b9f523bd1cc42354ef53f76ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5335e99c6c1f70658778ab57280d7b8cb2ab151b9f523bd1cc42354ef53f76ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.608599 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.630582 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e77ee052-7a38-4552-8c30-30bfebe79716\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6c67a288ed242cc97c05f9c8a01591c4ea3c3b8bb11e4e76d38bba7dd17f15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3e7e3b66ec5d45bfcbe5f4de7e21b540ba5bcc9859f3753465db8f992b731d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://953e18c7e26d1dbbd6f09ba86ce60483d35bd6bb271a76998acbc9e2d333a034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.653642 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.673906 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.684633 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.684695 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.684713 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.684735 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.684792 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:48Z","lastTransitionTime":"2025-11-28T13:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.687326 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.725399 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.787113 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.787149 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.787159 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.787173 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.787181 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:48Z","lastTransitionTime":"2025-11-28T13:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.829048 4857 scope.go:117] "RemoveContainer" containerID="4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f" Nov 28 13:19:48 crc kubenswrapper[4857]: E1128 13:19:48.829484 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-w25ss_openshift-ovn-kubernetes(bf74e995-2208-43c6-b89d-10318f55cda8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.850404 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e77ee052-7a38-4552-8c30-30bfebe79716\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6c67a288ed242cc97c05f9c8a01591c4ea3c3b8bb11e4e76d38bba7dd17f15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3e7e3b66ec5d45bfcbe5f4de7e21b540ba5bcc9859f3753465db8f992b731d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://953e18c7e26d1dbbd6f09ba86ce60483d35bd6bb271a76998acbc9e2d333a034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.870087 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.887171 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.889457 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.889503 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.889525 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.889550 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.889568 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:48Z","lastTransitionTime":"2025-11-28T13:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.899732 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.923537 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.938772 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.953505 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.976684 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.992525 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.992604 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.992632 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.992662 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:48 crc kubenswrapper[4857]: I1128 13:19:48.992688 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:48Z","lastTransitionTime":"2025-11-28T13:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.012673 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:47Z\\\",\\\"message\\\":\\\"andler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:19:46.747865 6865 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1128 13:19:46.747867 6865 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:19:46.747931 6865 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 13:19:46.747986 6865 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1128 13:19:46.747999 6865 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1128 13:19:46.748037 6865 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 13:19:46.748053 6865 factory.go:656] Stopping watch factory\\\\nI1128 13:19:46.748058 6865 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 13:19:46.748058 6865 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:19:46.748071 6865 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:19:46.748087 6865 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 13:19:46.748336 6865 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1128 13:19:46.748461 6865 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1128 13:19:46.748525 6865 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:19:46.748565 6865 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 13:19:46.748669 6865 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-w25ss_openshift-ovn-kubernetes(bf74e995-2208-43c6-b89d-10318f55cda8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.033191 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.050439 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.063181 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.080476 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81125daca139d6b77545a7ffee9064cd2fd693de61ae093e889ec72440be4856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:35Z\\\",\\\"message\\\":\\\"2025-11-28T13:18:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_72286343-065b-4a05-bba3-15806f0e4dc4\\\\n2025-11-28T13:18:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_72286343-065b-4a05-bba3-15806f0e4dc4 to /host/opt/cni/bin/\\\\n2025-11-28T13:18:50Z [verbose] multus-daemon started\\\\n2025-11-28T13:18:50Z [verbose] Readiness Indicator file check\\\\n2025-11-28T13:19:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.094597 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.094660 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.094678 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.094703 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.094725 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:49Z","lastTransitionTime":"2025-11-28T13:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.096156 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.114310 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.137159 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.156444 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jspn8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ab9b94a-66a7-4d68-8046-d6d97595330d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jspn8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.170990 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2189768-374b-4182-b7d6-855295893cd9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://512c4d00871836ce981f36aec4bd31095bd0d35afbac52016837aa0aad7d337b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5335e99c6c1f70658778ab57280d7b8cb2ab151b9f523bd1cc42354ef53f76ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5335e99c6c1f70658778ab57280d7b8cb2ab151b9f523bd1cc42354ef53f76ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.186889 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.197814 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.197851 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.197865 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.197891 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.197907 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:49Z","lastTransitionTime":"2025-11-28T13:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.301219 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.301295 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.301319 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.301350 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.301375 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:49Z","lastTransitionTime":"2025-11-28T13:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.308615 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.308675 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.308701 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.308908 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:49 crc kubenswrapper[4857]: E1128 13:19:49.308901 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:49 crc kubenswrapper[4857]: E1128 13:19:49.309031 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:49 crc kubenswrapper[4857]: E1128 13:19:49.309144 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:49 crc kubenswrapper[4857]: E1128 13:19:49.309313 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.405396 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.405434 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.405447 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.405464 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.405477 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:49Z","lastTransitionTime":"2025-11-28T13:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.509275 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.509360 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.509393 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.509427 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.509448 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:49Z","lastTransitionTime":"2025-11-28T13:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.612857 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.612918 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.612932 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.612952 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.612968 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:49Z","lastTransitionTime":"2025-11-28T13:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.715625 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.715687 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.715708 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.715735 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.715876 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:49Z","lastTransitionTime":"2025-11-28T13:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.817908 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.817947 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.817959 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.817976 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.817988 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:49Z","lastTransitionTime":"2025-11-28T13:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.832965 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovnkube-controller/3.log" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.920666 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.920712 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.920730 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.920778 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:49 crc kubenswrapper[4857]: I1128 13:19:49.920797 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:49Z","lastTransitionTime":"2025-11-28T13:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.023291 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.023366 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.023390 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.023419 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.023443 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:50Z","lastTransitionTime":"2025-11-28T13:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.126245 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.126326 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.126368 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.126410 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.126444 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:50Z","lastTransitionTime":"2025-11-28T13:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.229563 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.229629 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.229647 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.229673 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.229692 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:50Z","lastTransitionTime":"2025-11-28T13:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.332200 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.332336 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.332364 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.332391 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.332427 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:50Z","lastTransitionTime":"2025-11-28T13:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.435239 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.435342 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.435368 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.435400 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.435424 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:50Z","lastTransitionTime":"2025-11-28T13:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.539234 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.539303 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.539325 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.539355 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.539377 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:50Z","lastTransitionTime":"2025-11-28T13:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.642517 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.642603 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.642620 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.642663 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.642681 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:50Z","lastTransitionTime":"2025-11-28T13:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.746068 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.746133 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.746151 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.746175 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.746193 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:50Z","lastTransitionTime":"2025-11-28T13:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.849057 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.849147 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.849157 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.849171 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.849181 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:50Z","lastTransitionTime":"2025-11-28T13:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.952210 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.952243 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.952251 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.952263 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:50 crc kubenswrapper[4857]: I1128 13:19:50.952274 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:50Z","lastTransitionTime":"2025-11-28T13:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.055744 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.055808 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.055818 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.055833 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.055843 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:51Z","lastTransitionTime":"2025-11-28T13:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.159321 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.159463 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.159504 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.159536 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.159557 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:51Z","lastTransitionTime":"2025-11-28T13:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.262331 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.262406 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.262424 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.262448 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.262466 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:51Z","lastTransitionTime":"2025-11-28T13:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.308970 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.309052 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.309052 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:51 crc kubenswrapper[4857]: E1128 13:19:51.309144 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:51 crc kubenswrapper[4857]: E1128 13:19:51.309264 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.309285 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:51 crc kubenswrapper[4857]: E1128 13:19:51.309428 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:51 crc kubenswrapper[4857]: E1128 13:19:51.309497 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.365467 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.365540 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.365563 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.365598 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.365642 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:51Z","lastTransitionTime":"2025-11-28T13:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.469018 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.469077 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.469100 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.469132 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.469159 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:51Z","lastTransitionTime":"2025-11-28T13:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.691487 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.691735 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.691829 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:51 crc kubenswrapper[4857]: E1128 13:19:51.691878 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:55.691842537 +0000 UTC m=+147.719217724 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.691932 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.692270 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:51 crc kubenswrapper[4857]: E1128 13:19:51.691964 4857 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:19:51 crc kubenswrapper[4857]: E1128 13:19:51.692315 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:19:51 crc kubenswrapper[4857]: E1128 13:19:51.692355 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:19:51 crc kubenswrapper[4857]: E1128 13:19:51.692381 4857 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:19:51 crc kubenswrapper[4857]: E1128 13:19:51.692398 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:20:55.692369952 +0000 UTC m=+147.719745179 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:19:51 crc kubenswrapper[4857]: E1128 13:19:51.692432 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:19:51 crc kubenswrapper[4857]: E1128 13:19:51.692276 4857 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:19:51 crc kubenswrapper[4857]: E1128 13:19:51.692463 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 13:20:55.692434614 +0000 UTC m=+147.719809811 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:19:51 crc kubenswrapper[4857]: E1128 13:19:51.692454 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:19:51 crc kubenswrapper[4857]: E1128 13:19:51.692579 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:20:55.692532337 +0000 UTC m=+147.719907504 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:19:51 crc kubenswrapper[4857]: E1128 13:19:51.692591 4857 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:19:51 crc kubenswrapper[4857]: E1128 13:19:51.692695 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 13:20:55.692673341 +0000 UTC m=+147.720048668 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.693739 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.693820 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.693843 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.693873 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.693895 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:51Z","lastTransitionTime":"2025-11-28T13:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.797172 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.797247 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.797276 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.797307 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.797332 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:51Z","lastTransitionTime":"2025-11-28T13:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.899862 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.899919 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.899931 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.899949 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:51 crc kubenswrapper[4857]: I1128 13:19:51.899963 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:51Z","lastTransitionTime":"2025-11-28T13:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.002513 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.003045 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.003254 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.003407 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.003552 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:52Z","lastTransitionTime":"2025-11-28T13:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.106856 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.106923 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.106968 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.106992 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.107009 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:52Z","lastTransitionTime":"2025-11-28T13:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.210407 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.210449 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.210462 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.210480 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.210490 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:52Z","lastTransitionTime":"2025-11-28T13:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.314340 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.314418 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.314444 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.314476 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.314501 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:52Z","lastTransitionTime":"2025-11-28T13:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.418417 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.418503 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.418527 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.418563 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.418586 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:52Z","lastTransitionTime":"2025-11-28T13:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.522438 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.522539 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.522561 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.522586 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.522604 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:52Z","lastTransitionTime":"2025-11-28T13:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.626395 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.626815 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.626840 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.626869 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.626890 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:52Z","lastTransitionTime":"2025-11-28T13:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.729529 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.729582 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.729599 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.729625 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.729642 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:52Z","lastTransitionTime":"2025-11-28T13:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.833897 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.833956 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.833973 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.833997 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.834016 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:52Z","lastTransitionTime":"2025-11-28T13:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.937806 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.937874 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.937891 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.937919 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:52 crc kubenswrapper[4857]: I1128 13:19:52.937939 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:52Z","lastTransitionTime":"2025-11-28T13:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.039924 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.039958 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.039966 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.039978 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.039988 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:53Z","lastTransitionTime":"2025-11-28T13:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.136927 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.136994 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.137010 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.137028 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.137041 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:53Z","lastTransitionTime":"2025-11-28T13:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:53 crc kubenswrapper[4857]: E1128 13:19:53.151468 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.155586 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.155630 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.155667 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.155687 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.155699 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:53Z","lastTransitionTime":"2025-11-28T13:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:53 crc kubenswrapper[4857]: E1128 13:19:53.167841 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.174170 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.174252 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.174262 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.174279 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.174293 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:53Z","lastTransitionTime":"2025-11-28T13:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:53 crc kubenswrapper[4857]: E1128 13:19:53.185649 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.189685 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.189744 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.189808 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.189833 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.189850 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:53Z","lastTransitionTime":"2025-11-28T13:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:53 crc kubenswrapper[4857]: E1128 13:19:53.206849 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.211342 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.211456 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.211519 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.211594 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.211658 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:53Z","lastTransitionTime":"2025-11-28T13:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:53 crc kubenswrapper[4857]: E1128 13:19:53.224256 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8c4e6844-6425-4e0e-8b58-4bf189cd3967\\\",\\\"systemUUID\\\":\\\"7380db04-0488-4227-9557-a0513fb82c9e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:53 crc kubenswrapper[4857]: E1128 13:19:53.224483 4857 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.226682 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.226726 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.226743 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.226815 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.226852 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:53Z","lastTransitionTime":"2025-11-28T13:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.309329 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.309351 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.309359 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.309413 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:53 crc kubenswrapper[4857]: E1128 13:19:53.310200 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:53 crc kubenswrapper[4857]: E1128 13:19:53.310405 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:53 crc kubenswrapper[4857]: E1128 13:19:53.310508 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:53 crc kubenswrapper[4857]: E1128 13:19:53.310672 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.329460 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.329510 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.329522 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.329541 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.329556 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:53Z","lastTransitionTime":"2025-11-28T13:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.432218 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.432280 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.432288 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.432301 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.432309 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:53Z","lastTransitionTime":"2025-11-28T13:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.534901 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.534954 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.534965 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.534981 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.534992 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:53Z","lastTransitionTime":"2025-11-28T13:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.637163 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.637230 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.637248 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.637311 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.637332 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:53Z","lastTransitionTime":"2025-11-28T13:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.740018 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.740058 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.740067 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.740083 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.740092 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:53Z","lastTransitionTime":"2025-11-28T13:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.842775 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.842822 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.842836 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.842851 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.842863 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:53Z","lastTransitionTime":"2025-11-28T13:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.946547 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.946653 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.946682 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.946712 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:53 crc kubenswrapper[4857]: I1128 13:19:53.946736 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:53Z","lastTransitionTime":"2025-11-28T13:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.049395 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.049462 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.049479 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.049505 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.049523 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:54Z","lastTransitionTime":"2025-11-28T13:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.152520 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.152590 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.152612 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.152642 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.152666 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:54Z","lastTransitionTime":"2025-11-28T13:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.255011 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.255084 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.255104 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.255126 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.255146 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:54Z","lastTransitionTime":"2025-11-28T13:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.358346 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.358426 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.358443 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.358469 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.358487 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:54Z","lastTransitionTime":"2025-11-28T13:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.462562 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.462625 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.462643 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.462732 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.462786 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:54Z","lastTransitionTime":"2025-11-28T13:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.565940 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.566063 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.566137 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.566171 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.566194 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:54Z","lastTransitionTime":"2025-11-28T13:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.669694 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.669800 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.669879 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.669957 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.669999 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:54Z","lastTransitionTime":"2025-11-28T13:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.772611 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.772853 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.772899 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.772932 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.772956 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:54Z","lastTransitionTime":"2025-11-28T13:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.876467 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.876523 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.876539 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.876566 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.876601 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:54Z","lastTransitionTime":"2025-11-28T13:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.978790 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.978865 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.978878 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.978895 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:54 crc kubenswrapper[4857]: I1128 13:19:54.978906 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:54Z","lastTransitionTime":"2025-11-28T13:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.081803 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.081886 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.081909 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.081933 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.081953 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:55Z","lastTransitionTime":"2025-11-28T13:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.185528 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.185570 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.185579 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.185595 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.185609 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:55Z","lastTransitionTime":"2025-11-28T13:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.289033 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.289092 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.289107 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.289128 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.289143 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:55Z","lastTransitionTime":"2025-11-28T13:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.309460 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:55 crc kubenswrapper[4857]: E1128 13:19:55.309568 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.309732 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:55 crc kubenswrapper[4857]: E1128 13:19:55.310186 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.310329 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.310352 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:55 crc kubenswrapper[4857]: E1128 13:19:55.310584 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:55 crc kubenswrapper[4857]: E1128 13:19:55.310821 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.393227 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.393550 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.394002 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.394350 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.394560 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:55Z","lastTransitionTime":"2025-11-28T13:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.497106 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.497179 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.497203 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.497236 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.497258 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:55Z","lastTransitionTime":"2025-11-28T13:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.600844 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.600920 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.600944 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.600972 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.600989 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:55Z","lastTransitionTime":"2025-11-28T13:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.704148 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.704225 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.704247 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.704277 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.704298 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:55Z","lastTransitionTime":"2025-11-28T13:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.807587 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.807651 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.807669 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.807694 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.807712 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:55Z","lastTransitionTime":"2025-11-28T13:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.910817 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.910905 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.910926 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.910953 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:55 crc kubenswrapper[4857]: I1128 13:19:55.910971 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:55Z","lastTransitionTime":"2025-11-28T13:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.014293 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.014347 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.014383 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.014412 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.014434 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:56Z","lastTransitionTime":"2025-11-28T13:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.118569 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.118635 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.118651 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.118674 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.118693 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:56Z","lastTransitionTime":"2025-11-28T13:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.222061 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.222131 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.222154 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.222185 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.222202 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:56Z","lastTransitionTime":"2025-11-28T13:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.324496 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.324535 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.324545 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.324556 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.324565 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:56Z","lastTransitionTime":"2025-11-28T13:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.428537 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.428623 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.428651 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.428682 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.428722 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:56Z","lastTransitionTime":"2025-11-28T13:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.532121 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.532178 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.532202 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.532231 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.532252 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:56Z","lastTransitionTime":"2025-11-28T13:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.635716 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.635843 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.635858 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.636174 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.636205 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:56Z","lastTransitionTime":"2025-11-28T13:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.739651 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.739780 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.739804 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.739831 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.739849 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:56Z","lastTransitionTime":"2025-11-28T13:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.842662 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.843104 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.843281 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.843424 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.843556 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:56Z","lastTransitionTime":"2025-11-28T13:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.947094 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.947175 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.947200 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.947232 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:56 crc kubenswrapper[4857]: I1128 13:19:56.947256 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:56Z","lastTransitionTime":"2025-11-28T13:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.050901 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.050962 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.050973 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.050996 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.051017 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:57Z","lastTransitionTime":"2025-11-28T13:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.153794 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.153863 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.153873 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.153922 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.153935 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:57Z","lastTransitionTime":"2025-11-28T13:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.256355 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.256419 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.256441 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.256472 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.256510 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:57Z","lastTransitionTime":"2025-11-28T13:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.308786 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.308872 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.308962 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:57 crc kubenswrapper[4857]: E1128 13:19:57.309084 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.309146 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:57 crc kubenswrapper[4857]: E1128 13:19:57.309281 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:57 crc kubenswrapper[4857]: E1128 13:19:57.309324 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:57 crc kubenswrapper[4857]: E1128 13:19:57.309388 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.359575 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.359646 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.359669 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.359694 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.359715 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:57Z","lastTransitionTime":"2025-11-28T13:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.461663 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.461699 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.461708 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.461722 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.461730 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:57Z","lastTransitionTime":"2025-11-28T13:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.564907 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.564977 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.564995 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.565026 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.565053 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:57Z","lastTransitionTime":"2025-11-28T13:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.667734 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.667860 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.667886 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.667921 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.667946 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:57Z","lastTransitionTime":"2025-11-28T13:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.771163 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.771239 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.771275 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.771303 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.771324 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:57Z","lastTransitionTime":"2025-11-28T13:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.873916 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.873982 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.874000 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.874026 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.874045 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:57Z","lastTransitionTime":"2025-11-28T13:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.976793 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.976883 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.976919 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.976959 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:57 crc kubenswrapper[4857]: I1128 13:19:57.976983 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:57Z","lastTransitionTime":"2025-11-28T13:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.081377 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.081439 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.081450 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.081466 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.081481 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:58Z","lastTransitionTime":"2025-11-28T13:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.184378 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.184448 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.184466 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.184489 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.184506 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:58Z","lastTransitionTime":"2025-11-28T13:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.287447 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.287509 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.287527 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.287549 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.287568 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:58Z","lastTransitionTime":"2025-11-28T13:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.329106 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.346490 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.358784 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a83920b8284c7a565b5a19d1da7e6575ae701fd71c14fcbb6a8ddbdd20cf294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.376723 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-9f578" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d49db47f-8c30-4756-92d5-2ae0be0c8f84\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3d38118fd00b0da73665b64eb2e8a59bc755daf417759491f99db94319c50f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6kn8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-9f578\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.390918 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.390963 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.390977 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.390996 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.391012 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:58Z","lastTransitionTime":"2025-11-28T13:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.397148 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aba2e99a-c0de-4ae5-b347-de1565fd9d68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14748bd840c67f9e33f9429423d7d4c29eb8f251fd460b95a946e1a64483ac83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gc5bn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jdgls\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.421099 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-tzg2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1031bdc4-d6c6-4425-805b-506069f5667d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81125daca139d6b77545a7ffee9064cd2fd693de61ae093e889ec72440be4856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:35Z\\\",\\\"message\\\":\\\"2025-11-28T13:18:50+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_72286343-065b-4a05-bba3-15806f0e4dc4\\\\n2025-11-28T13:18:50+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_72286343-065b-4a05-bba3-15806f0e4dc4 to /host/opt/cni/bin/\\\\n2025-11-28T13:18:50Z [verbose] multus-daemon started\\\\n2025-11-28T13:18:50Z [verbose] Readiness Indicator file check\\\\n2025-11-28T13:19:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jdp2n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-tzg2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.438659 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2189768-374b-4182-b7d6-855295893cd9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://512c4d00871836ce981f36aec4bd31095bd0d35afbac52016837aa0aad7d337b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5335e99c6c1f70658778ab57280d7b8cb2ab151b9f523bd1cc42354ef53f76ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5335e99c6c1f70658778ab57280d7b8cb2ab151b9f523bd1cc42354ef53f76ac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.459794 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc611435d3aad61d7cded1c558eceb6d6a18e0f38de67d6b4d04ba27423b403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.473125 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27d6k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9dfc021-dc50-485f-a833-e048ab7a390c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89d11fc7f77b4c4173bb3a3722eb434b6f35107076bebe24b9d91d2ec5cd1440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://655c83b743868aa078f0848895cfd3fc84a716a896b75e20e201b70d080b03fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://327ae30aa8e09cc8301ecab6039a4780ed43b8562b9f8ff8d952722799b91011\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://691081d209bd36a0a5057d94386430e768f249e45d339235d97621fc5863ac73\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcef9e1f3acd46adfa0c6f6ec9156622440c8b4c07c85016cc8c2463f3af37c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0c9aab1b84f7f7c612cb4db220bf58039890980989138366497ffc6e86d0527\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5948e0576aea457adbbcd0d5cff44959a83bf044ecb119bfda9911be64bbd15a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hlnph\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27d6k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.483895 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jspn8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ab9b94a-66a7-4d68-8046-d6d97595330d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qmzxm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:03Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jspn8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.493313 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.493349 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.493359 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.493375 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.493385 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:58Z","lastTransitionTime":"2025-11-28T13:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.502058 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0e1d95ef-d750-48a7-a939-70526e1ef20b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ea96a3a4fdaac179a74488f68d2e2fd06d83ed58a7a187e6302cd134bf2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d2030158e6e7c8207d6f7fc9a969200187b7768e8841124f07a3400e50e23dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aae7f89eaec16e6777ba60dc02bcd3b6803e9c808fdf4a43f102d238702cef00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c85d63d0f03cc2fd7c20c810bdc50ff6af06b839750af8e8af78fdcab32bfd21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a7dd56834eb09d4878f9bdbb41509158339cd6ef4c78606ee9601f59124e96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c54d12ebc6460906d8c05a7b6ab82eecca625f9a9785eb58d9010aef3426e3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dbc0986b923a8667890bc8c9f9cb076fccce07dce48c2c13e7590a22d9fd96c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4925cb1a76bf604b69f739641441b30a73923fec433445190777513f6e168bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.519472 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f137641-9550-43d3-99c9-fb4d61e9eeb9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:18:42.476255 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:18:42.481936 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2689643572/tls.crt::/tmp/serving-cert-2689643572/tls.key\\\\\\\"\\\\nI1128 13:18:48.002932 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:18:48.059161 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:18:48.059201 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:18:48.059235 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:18:48.059241 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:18:48.088023 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:18:48.088053 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088058 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:18:48.088062 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:18:48.088066 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:18:48.088069 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:18:48.088072 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:18:48.088869 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:18:48.091003 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.532768 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e77ee052-7a38-4552-8c30-30bfebe79716\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc6c67a288ed242cc97c05f9c8a01591c4ea3c3b8bb11e4e76d38bba7dd17f15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3e7e3b66ec5d45bfcbe5f4de7e21b540ba5bcc9859f3753465db8f992b731d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://953e18c7e26d1dbbd6f09ba86ce60483d35bd6bb271a76998acbc9e2d333a034\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65aca5158444f4f1cfa23a526f33afe796d7c390a9d4007ec93a861a8399cb23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.546790 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:48Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b862590203aea4dc308ee404de5dfce0ff64051aa388cf973a59aee26c59743f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d59477a1fe0f27e8ab7c58e1658a41e6a3c269390de0f5a30f190d86935b6c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.562351 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.575738 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-w8b2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86b54d8d-03e5-4e53-906f-66060d30608d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1524d5827e3b49983eaf4ac38b71f0cb13a8232616126f62f75db0b9fd31d2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6wxd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:51Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-w8b2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.594074 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f1aff43-ea22-456e-9861-3154457a7ff8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70af22c7c032e6f7e432594e32c1910c7f154438db026d48b9d5dbdf61a454cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c740000f67948f74a8912cad6b6d9af14ec049c235c3b39af6c22cf1d93e6ad6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e63fc11bebe6938173ad3f38da354d4d0910809ae23a286e0a69d1e2e5c1b502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.596057 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.596092 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.596103 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.596117 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.596128 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:58Z","lastTransitionTime":"2025-11-28T13:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.620475 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf74e995-2208-43c6-b89d-10318f55cda8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:19:47Z\\\",\\\"message\\\":\\\"andler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:19:46.747865 6865 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1128 13:19:46.747867 6865 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:19:46.747931 6865 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 13:19:46.747986 6865 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1128 13:19:46.747999 6865 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1128 13:19:46.748037 6865 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 13:19:46.748053 6865 factory.go:656] Stopping watch factory\\\\nI1128 13:19:46.748058 6865 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 13:19:46.748058 6865 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:19:46.748071 6865 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:19:46.748087 6865 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 13:19:46.748336 6865 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1128 13:19:46.748461 6865 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1128 13:19:46.748525 6865 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:19:46.748565 6865 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 13:19:46.748669 6865 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:19:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-w25ss_openshift-ovn-kubernetes(bf74e995-2208-43c6-b89d-10318f55cda8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:18:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:18:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7nc22\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:18:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-w25ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.636840 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1de596ae-343e-4839-b049-61fb6b8fe7c8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:19:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d95a5273a98c54e77a7eacddf2dd80d3b844b0e91f4acea5b62956781ed7a45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40364e12e5cb85f33d248ca158dabde6c73b2f1b58e4d3c95f454f0bce166c89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:19:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tfx7q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:19:02Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7mglc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.698960 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.698997 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.699007 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.699022 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.699034 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:58Z","lastTransitionTime":"2025-11-28T13:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.801072 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.801110 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.801119 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.801133 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.801142 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:58Z","lastTransitionTime":"2025-11-28T13:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.904089 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.904132 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.904144 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.904162 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:58 crc kubenswrapper[4857]: I1128 13:19:58.904176 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:58Z","lastTransitionTime":"2025-11-28T13:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.007248 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.007293 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.007304 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.007320 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.007331 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:59Z","lastTransitionTime":"2025-11-28T13:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.110355 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.110421 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.110439 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.110471 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.110493 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:59Z","lastTransitionTime":"2025-11-28T13:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.213921 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.213992 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.214016 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.214047 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.214071 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:59Z","lastTransitionTime":"2025-11-28T13:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.308986 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.309014 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.309026 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.309066 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:19:59 crc kubenswrapper[4857]: E1128 13:19:59.309220 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:19:59 crc kubenswrapper[4857]: E1128 13:19:59.309337 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:19:59 crc kubenswrapper[4857]: E1128 13:19:59.309911 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.311231 4857 scope.go:117] "RemoveContainer" containerID="4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f" Nov 28 13:19:59 crc kubenswrapper[4857]: E1128 13:19:59.311849 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-w25ss_openshift-ovn-kubernetes(bf74e995-2208-43c6-b89d-10318f55cda8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" Nov 28 13:19:59 crc kubenswrapper[4857]: E1128 13:19:59.312131 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.317839 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.318016 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.318050 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.318080 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.318102 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:59Z","lastTransitionTime":"2025-11-28T13:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.421076 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.421107 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.421117 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.421132 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.421143 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:59Z","lastTransitionTime":"2025-11-28T13:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.524305 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.524373 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.524391 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.524416 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.524435 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:59Z","lastTransitionTime":"2025-11-28T13:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.628112 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.628167 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.628186 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.628210 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.628228 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:59Z","lastTransitionTime":"2025-11-28T13:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.731132 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.731193 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.731216 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.731245 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.731268 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:59Z","lastTransitionTime":"2025-11-28T13:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.834262 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.834302 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.834313 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.834331 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.834343 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:59Z","lastTransitionTime":"2025-11-28T13:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.936833 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.936877 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.936889 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.936905 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:19:59 crc kubenswrapper[4857]: I1128 13:19:59.936917 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:19:59Z","lastTransitionTime":"2025-11-28T13:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.040858 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.040942 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.040966 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.040998 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.041024 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:00Z","lastTransitionTime":"2025-11-28T13:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.144196 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.144264 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.144288 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.144316 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.144337 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:00Z","lastTransitionTime":"2025-11-28T13:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.247747 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.247839 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.247856 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.247878 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.247902 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:00Z","lastTransitionTime":"2025-11-28T13:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.351114 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.351165 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.351182 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.351204 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.351221 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:00Z","lastTransitionTime":"2025-11-28T13:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.454197 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.454243 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.454254 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.454271 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.454283 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:00Z","lastTransitionTime":"2025-11-28T13:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.557282 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.557373 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.557414 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.557446 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.557470 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:00Z","lastTransitionTime":"2025-11-28T13:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.660683 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.660792 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.660818 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.660846 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.660867 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:00Z","lastTransitionTime":"2025-11-28T13:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.764403 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.764462 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.764482 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.764505 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.764522 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:00Z","lastTransitionTime":"2025-11-28T13:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.868659 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.868720 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.868736 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.868789 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.868807 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:00Z","lastTransitionTime":"2025-11-28T13:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.972272 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.972345 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.972363 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.972387 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:00 crc kubenswrapper[4857]: I1128 13:20:00.972404 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:00Z","lastTransitionTime":"2025-11-28T13:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.075856 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.075926 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.075946 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.075972 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.075990 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:01Z","lastTransitionTime":"2025-11-28T13:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.179551 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.179625 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.179653 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.179685 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.179709 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:01Z","lastTransitionTime":"2025-11-28T13:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.282516 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.282590 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.282661 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.282696 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.282717 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:01Z","lastTransitionTime":"2025-11-28T13:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.309220 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.309420 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.309468 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:01 crc kubenswrapper[4857]: E1128 13:20:01.309619 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.309638 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:01 crc kubenswrapper[4857]: E1128 13:20:01.309686 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:01 crc kubenswrapper[4857]: E1128 13:20:01.309857 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:01 crc kubenswrapper[4857]: E1128 13:20:01.309963 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.385492 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.385567 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.385580 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.385596 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.385607 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:01Z","lastTransitionTime":"2025-11-28T13:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.488188 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.488263 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.488291 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.488318 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.488337 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:01Z","lastTransitionTime":"2025-11-28T13:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.591142 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.591186 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.591195 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.591213 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.591225 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:01Z","lastTransitionTime":"2025-11-28T13:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.693507 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.693553 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.693568 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.693586 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.693600 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:01Z","lastTransitionTime":"2025-11-28T13:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.796739 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.796813 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.796826 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.796846 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.796859 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:01Z","lastTransitionTime":"2025-11-28T13:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.899431 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.899485 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.899496 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.899548 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:01 crc kubenswrapper[4857]: I1128 13:20:01.899563 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:01Z","lastTransitionTime":"2025-11-28T13:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.002228 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.002286 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.002297 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.002317 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.002328 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:02Z","lastTransitionTime":"2025-11-28T13:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.106681 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.106742 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.106994 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.107027 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.107041 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:02Z","lastTransitionTime":"2025-11-28T13:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.210710 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.210804 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.210823 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.210845 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.210862 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:02Z","lastTransitionTime":"2025-11-28T13:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.313873 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.313950 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.313973 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.313996 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.314013 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:02Z","lastTransitionTime":"2025-11-28T13:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.416097 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.416162 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.416180 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.416202 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.416221 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:02Z","lastTransitionTime":"2025-11-28T13:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.518989 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.519081 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.519144 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.519164 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.519181 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:02Z","lastTransitionTime":"2025-11-28T13:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.621424 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.621515 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.621548 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.621578 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.621599 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:02Z","lastTransitionTime":"2025-11-28T13:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.724765 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.724840 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.724850 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.724864 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.724876 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:02Z","lastTransitionTime":"2025-11-28T13:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.827832 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.827895 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.827912 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.827930 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.827941 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:02Z","lastTransitionTime":"2025-11-28T13:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.930537 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.930594 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.930611 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.930636 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:02 crc kubenswrapper[4857]: I1128 13:20:02.930653 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:02Z","lastTransitionTime":"2025-11-28T13:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.033390 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.033472 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.033497 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.033526 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.033543 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:03Z","lastTransitionTime":"2025-11-28T13:20:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.136657 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.136724 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.136775 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.136808 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.136833 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:03Z","lastTransitionTime":"2025-11-28T13:20:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.239127 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.239193 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.239217 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.239248 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.239270 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:03Z","lastTransitionTime":"2025-11-28T13:20:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.309368 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:03 crc kubenswrapper[4857]: E1128 13:20:03.309626 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.309678 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.309786 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.310124 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:03 crc kubenswrapper[4857]: E1128 13:20:03.310172 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:03 crc kubenswrapper[4857]: E1128 13:20:03.310355 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:03 crc kubenswrapper[4857]: E1128 13:20:03.310442 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.342840 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.342900 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.342919 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.342942 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.342960 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:03Z","lastTransitionTime":"2025-11-28T13:20:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.446554 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.446603 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.446618 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.446635 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.446649 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:03Z","lastTransitionTime":"2025-11-28T13:20:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.549328 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.549398 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.549413 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.549435 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.549451 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:03Z","lastTransitionTime":"2025-11-28T13:20:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.607316 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.607394 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.607410 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.607429 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.607442 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:20:03Z","lastTransitionTime":"2025-11-28T13:20:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.684252 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm"] Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.684836 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.689667 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.690297 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.690637 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.695841 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.715543 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ffb0607a-9471-44f2-858b-ad065581d42a-service-ca\") pod \"cluster-version-operator-5c965bbfc6-tsvgm\" (UID: \"ffb0607a-9471-44f2-858b-ad065581d42a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.715638 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/ffb0607a-9471-44f2-858b-ad065581d42a-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-tsvgm\" (UID: \"ffb0607a-9471-44f2-858b-ad065581d42a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.715733 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/ffb0607a-9471-44f2-858b-ad065581d42a-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-tsvgm\" (UID: \"ffb0607a-9471-44f2-858b-ad065581d42a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.715848 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffb0607a-9471-44f2-858b-ad065581d42a-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-tsvgm\" (UID: \"ffb0607a-9471-44f2-858b-ad065581d42a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.715899 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffb0607a-9471-44f2-858b-ad065581d42a-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-tsvgm\" (UID: \"ffb0607a-9471-44f2-858b-ad065581d42a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.784217 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=76.784201915 podStartE2EDuration="1m16.784201915s" podCreationTimestamp="2025-11-28 13:18:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:03.757598965 +0000 UTC m=+95.784974142" watchObservedRunningTime="2025-11-28 13:20:03.784201915 +0000 UTC m=+95.811577082" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.801249 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7mglc" podStartSLOduration=74.801222692 podStartE2EDuration="1m14.801222692s" podCreationTimestamp="2025-11-28 13:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:03.798871905 +0000 UTC m=+95.826247092" watchObservedRunningTime="2025-11-28 13:20:03.801222692 +0000 UTC m=+95.828597869" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.816703 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/ffb0607a-9471-44f2-858b-ad065581d42a-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-tsvgm\" (UID: \"ffb0607a-9471-44f2-858b-ad065581d42a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.816992 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/ffb0607a-9471-44f2-858b-ad065581d42a-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-tsvgm\" (UID: \"ffb0607a-9471-44f2-858b-ad065581d42a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.817105 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffb0607a-9471-44f2-858b-ad065581d42a-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-tsvgm\" (UID: \"ffb0607a-9471-44f2-858b-ad065581d42a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.817236 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffb0607a-9471-44f2-858b-ad065581d42a-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-tsvgm\" (UID: \"ffb0607a-9471-44f2-858b-ad065581d42a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.817381 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ffb0607a-9471-44f2-858b-ad065581d42a-service-ca\") pod \"cluster-version-operator-5c965bbfc6-tsvgm\" (UID: \"ffb0607a-9471-44f2-858b-ad065581d42a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.817054 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/ffb0607a-9471-44f2-858b-ad065581d42a-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-tsvgm\" (UID: \"ffb0607a-9471-44f2-858b-ad065581d42a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.816816 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/ffb0607a-9471-44f2-858b-ad065581d42a-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-tsvgm\" (UID: \"ffb0607a-9471-44f2-858b-ad065581d42a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.818106 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ffb0607a-9471-44f2-858b-ad065581d42a-service-ca\") pod \"cluster-version-operator-5c965bbfc6-tsvgm\" (UID: \"ffb0607a-9471-44f2-858b-ad065581d42a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.824701 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffb0607a-9471-44f2-858b-ad065581d42a-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-tsvgm\" (UID: \"ffb0607a-9471-44f2-858b-ad065581d42a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.839290 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffb0607a-9471-44f2-858b-ad065581d42a-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-tsvgm\" (UID: \"ffb0607a-9471-44f2-858b-ad065581d42a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.851449 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-9f578" podStartSLOduration=76.851430077 podStartE2EDuration="1m16.851430077s" podCreationTimestamp="2025-11-28 13:18:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:03.851225151 +0000 UTC m=+95.878600318" watchObservedRunningTime="2025-11-28 13:20:03.851430077 +0000 UTC m=+95.878805244" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.883581 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podStartSLOduration=75.883564016 podStartE2EDuration="1m15.883564016s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:03.864291845 +0000 UTC m=+95.891667012" watchObservedRunningTime="2025-11-28 13:20:03.883564016 +0000 UTC m=+95.910939183" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.883787 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-tzg2g" podStartSLOduration=75.883782152 podStartE2EDuration="1m15.883782152s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:03.883076222 +0000 UTC m=+95.910451389" watchObservedRunningTime="2025-11-28 13:20:03.883782152 +0000 UTC m=+95.911157309" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.895275 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=20.89525527 podStartE2EDuration="20.89525527s" podCreationTimestamp="2025-11-28 13:19:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:03.895132386 +0000 UTC m=+95.922507553" watchObservedRunningTime="2025-11-28 13:20:03.89525527 +0000 UTC m=+95.922630437" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.938446 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-27d6k" podStartSLOduration=75.938431664 podStartE2EDuration="1m15.938431664s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:03.927730908 +0000 UTC m=+95.955106075" watchObservedRunningTime="2025-11-28 13:20:03.938431664 +0000 UTC m=+95.965806831" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.962220 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=76.962204102 podStartE2EDuration="1m16.962204102s" podCreationTimestamp="2025-11-28 13:18:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:03.960823663 +0000 UTC m=+95.988198830" watchObservedRunningTime="2025-11-28 13:20:03.962204102 +0000 UTC m=+95.989579269" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.987165 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=75.987147705 podStartE2EDuration="1m15.987147705s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:03.975246505 +0000 UTC m=+96.002621692" watchObservedRunningTime="2025-11-28 13:20:03.987147705 +0000 UTC m=+96.014522872" Nov 28 13:20:03 crc kubenswrapper[4857]: I1128 13:20:03.999727 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=42.999707624 podStartE2EDuration="42.999707624s" podCreationTimestamp="2025-11-28 13:19:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:03.988108883 +0000 UTC m=+96.015484050" watchObservedRunningTime="2025-11-28 13:20:03.999707624 +0000 UTC m=+96.027082791" Nov 28 13:20:04 crc kubenswrapper[4857]: I1128 13:20:04.021654 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" Nov 28 13:20:04 crc kubenswrapper[4857]: I1128 13:20:04.022587 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-w8b2n" podStartSLOduration=76.022566648 podStartE2EDuration="1m16.022566648s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:04.021486707 +0000 UTC m=+96.048861874" watchObservedRunningTime="2025-11-28 13:20:04.022566648 +0000 UTC m=+96.049941805" Nov 28 13:20:04 crc kubenswrapper[4857]: I1128 13:20:04.890392 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" event={"ID":"ffb0607a-9471-44f2-858b-ad065581d42a","Type":"ContainerStarted","Data":"9ec34f46e27b43be4f66f34522b9fa06343b45f893cc98dd7b0adaf807db01c0"} Nov 28 13:20:04 crc kubenswrapper[4857]: I1128 13:20:04.890441 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" event={"ID":"ffb0607a-9471-44f2-858b-ad065581d42a","Type":"ContainerStarted","Data":"0abb74d1eb3a144d2a15b127dd84ec6648fead7e179decd43c199d2205b7e0bb"} Nov 28 13:20:04 crc kubenswrapper[4857]: I1128 13:20:04.913557 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-tsvgm" podStartSLOduration=76.913533926 podStartE2EDuration="1m16.913533926s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:04.91227259 +0000 UTC m=+96.939647827" watchObservedRunningTime="2025-11-28 13:20:04.913533926 +0000 UTC m=+96.940909133" Nov 28 13:20:05 crc kubenswrapper[4857]: I1128 13:20:05.308955 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:05 crc kubenswrapper[4857]: I1128 13:20:05.308994 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:05 crc kubenswrapper[4857]: I1128 13:20:05.308967 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:05 crc kubenswrapper[4857]: I1128 13:20:05.309123 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:05 crc kubenswrapper[4857]: E1128 13:20:05.309078 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:05 crc kubenswrapper[4857]: E1128 13:20:05.309262 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:05 crc kubenswrapper[4857]: E1128 13:20:05.309480 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:05 crc kubenswrapper[4857]: E1128 13:20:05.309595 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:07 crc kubenswrapper[4857]: I1128 13:20:07.308917 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:07 crc kubenswrapper[4857]: I1128 13:20:07.309034 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:07 crc kubenswrapper[4857]: I1128 13:20:07.309009 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:07 crc kubenswrapper[4857]: I1128 13:20:07.309000 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:07 crc kubenswrapper[4857]: E1128 13:20:07.309288 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:07 crc kubenswrapper[4857]: E1128 13:20:07.309436 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:07 crc kubenswrapper[4857]: E1128 13:20:07.309570 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:07 crc kubenswrapper[4857]: E1128 13:20:07.309737 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:07 crc kubenswrapper[4857]: I1128 13:20:07.763413 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs\") pod \"network-metrics-daemon-jspn8\" (UID: \"9ab9b94a-66a7-4d68-8046-d6d97595330d\") " pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:07 crc kubenswrapper[4857]: E1128 13:20:07.763557 4857 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:20:07 crc kubenswrapper[4857]: E1128 13:20:07.763619 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs podName:9ab9b94a-66a7-4d68-8046-d6d97595330d nodeName:}" failed. No retries permitted until 2025-11-28 13:21:11.763599653 +0000 UTC m=+163.790974820 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs") pod "network-metrics-daemon-jspn8" (UID: "9ab9b94a-66a7-4d68-8046-d6d97595330d") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:20:09 crc kubenswrapper[4857]: I1128 13:20:09.308523 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:09 crc kubenswrapper[4857]: I1128 13:20:09.308602 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:09 crc kubenswrapper[4857]: I1128 13:20:09.308523 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:09 crc kubenswrapper[4857]: E1128 13:20:09.308680 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:09 crc kubenswrapper[4857]: E1128 13:20:09.308891 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:09 crc kubenswrapper[4857]: I1128 13:20:09.308944 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:09 crc kubenswrapper[4857]: E1128 13:20:09.309010 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:09 crc kubenswrapper[4857]: E1128 13:20:09.309119 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:11 crc kubenswrapper[4857]: I1128 13:20:11.309192 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:11 crc kubenswrapper[4857]: E1128 13:20:11.309316 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:11 crc kubenswrapper[4857]: I1128 13:20:11.309337 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:11 crc kubenswrapper[4857]: E1128 13:20:11.309427 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:11 crc kubenswrapper[4857]: I1128 13:20:11.309526 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:11 crc kubenswrapper[4857]: E1128 13:20:11.309721 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:11 crc kubenswrapper[4857]: I1128 13:20:11.310174 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:11 crc kubenswrapper[4857]: E1128 13:20:11.310374 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:13 crc kubenswrapper[4857]: I1128 13:20:13.308999 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:13 crc kubenswrapper[4857]: I1128 13:20:13.309021 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:13 crc kubenswrapper[4857]: I1128 13:20:13.309080 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:13 crc kubenswrapper[4857]: I1128 13:20:13.309106 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:13 crc kubenswrapper[4857]: E1128 13:20:13.309299 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:13 crc kubenswrapper[4857]: E1128 13:20:13.309555 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:13 crc kubenswrapper[4857]: E1128 13:20:13.309976 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:13 crc kubenswrapper[4857]: E1128 13:20:13.310076 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:13 crc kubenswrapper[4857]: I1128 13:20:13.310210 4857 scope.go:117] "RemoveContainer" containerID="4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f" Nov 28 13:20:13 crc kubenswrapper[4857]: E1128 13:20:13.310382 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-w25ss_openshift-ovn-kubernetes(bf74e995-2208-43c6-b89d-10318f55cda8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" Nov 28 13:20:15 crc kubenswrapper[4857]: I1128 13:20:15.308873 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:15 crc kubenswrapper[4857]: I1128 13:20:15.308920 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:15 crc kubenswrapper[4857]: I1128 13:20:15.309646 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:15 crc kubenswrapper[4857]: E1128 13:20:15.309931 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:15 crc kubenswrapper[4857]: I1128 13:20:15.309977 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:15 crc kubenswrapper[4857]: E1128 13:20:15.310181 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:15 crc kubenswrapper[4857]: E1128 13:20:15.310282 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:15 crc kubenswrapper[4857]: E1128 13:20:15.310616 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:17 crc kubenswrapper[4857]: I1128 13:20:17.309362 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:17 crc kubenswrapper[4857]: I1128 13:20:17.309393 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:17 crc kubenswrapper[4857]: E1128 13:20:17.309911 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:17 crc kubenswrapper[4857]: I1128 13:20:17.309443 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:17 crc kubenswrapper[4857]: E1128 13:20:17.310095 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:17 crc kubenswrapper[4857]: I1128 13:20:17.309469 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:17 crc kubenswrapper[4857]: E1128 13:20:17.310189 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:17 crc kubenswrapper[4857]: E1128 13:20:17.310474 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:19 crc kubenswrapper[4857]: I1128 13:20:19.309111 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:19 crc kubenswrapper[4857]: I1128 13:20:19.309165 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:19 crc kubenswrapper[4857]: E1128 13:20:19.309319 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:19 crc kubenswrapper[4857]: I1128 13:20:19.309400 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:19 crc kubenswrapper[4857]: E1128 13:20:19.309560 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:19 crc kubenswrapper[4857]: I1128 13:20:19.309607 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:19 crc kubenswrapper[4857]: E1128 13:20:19.309928 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:19 crc kubenswrapper[4857]: E1128 13:20:19.310018 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:21 crc kubenswrapper[4857]: I1128 13:20:21.309558 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:21 crc kubenswrapper[4857]: I1128 13:20:21.309599 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:21 crc kubenswrapper[4857]: I1128 13:20:21.309622 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:21 crc kubenswrapper[4857]: I1128 13:20:21.309727 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:21 crc kubenswrapper[4857]: E1128 13:20:21.309928 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:21 crc kubenswrapper[4857]: E1128 13:20:21.310058 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:21 crc kubenswrapper[4857]: E1128 13:20:21.310155 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:21 crc kubenswrapper[4857]: E1128 13:20:21.310214 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:22 crc kubenswrapper[4857]: I1128 13:20:22.951989 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-tzg2g_1031bdc4-d6c6-4425-805b-506069f5667d/kube-multus/1.log" Nov 28 13:20:22 crc kubenswrapper[4857]: I1128 13:20:22.953511 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-tzg2g_1031bdc4-d6c6-4425-805b-506069f5667d/kube-multus/0.log" Nov 28 13:20:22 crc kubenswrapper[4857]: I1128 13:20:22.953562 4857 generic.go:334] "Generic (PLEG): container finished" podID="1031bdc4-d6c6-4425-805b-506069f5667d" containerID="81125daca139d6b77545a7ffee9064cd2fd693de61ae093e889ec72440be4856" exitCode=1 Nov 28 13:20:22 crc kubenswrapper[4857]: I1128 13:20:22.953591 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-tzg2g" event={"ID":"1031bdc4-d6c6-4425-805b-506069f5667d","Type":"ContainerDied","Data":"81125daca139d6b77545a7ffee9064cd2fd693de61ae093e889ec72440be4856"} Nov 28 13:20:22 crc kubenswrapper[4857]: I1128 13:20:22.953630 4857 scope.go:117] "RemoveContainer" containerID="88b9109c0366a4c5c60c843f4d0ba11029143b141432963c34d8c8e75a566f6a" Nov 28 13:20:22 crc kubenswrapper[4857]: I1128 13:20:22.954397 4857 scope.go:117] "RemoveContainer" containerID="81125daca139d6b77545a7ffee9064cd2fd693de61ae093e889ec72440be4856" Nov 28 13:20:22 crc kubenswrapper[4857]: E1128 13:20:22.954834 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-tzg2g_openshift-multus(1031bdc4-d6c6-4425-805b-506069f5667d)\"" pod="openshift-multus/multus-tzg2g" podUID="1031bdc4-d6c6-4425-805b-506069f5667d" Nov 28 13:20:23 crc kubenswrapper[4857]: I1128 13:20:23.309278 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:23 crc kubenswrapper[4857]: I1128 13:20:23.309405 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:23 crc kubenswrapper[4857]: E1128 13:20:23.309486 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:23 crc kubenswrapper[4857]: E1128 13:20:23.309635 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:23 crc kubenswrapper[4857]: I1128 13:20:23.309940 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:23 crc kubenswrapper[4857]: I1128 13:20:23.309962 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:23 crc kubenswrapper[4857]: E1128 13:20:23.310064 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:23 crc kubenswrapper[4857]: E1128 13:20:23.310125 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:23 crc kubenswrapper[4857]: I1128 13:20:23.958882 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-tzg2g_1031bdc4-d6c6-4425-805b-506069f5667d/kube-multus/1.log" Nov 28 13:20:25 crc kubenswrapper[4857]: I1128 13:20:25.308891 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:25 crc kubenswrapper[4857]: E1128 13:20:25.309067 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:25 crc kubenswrapper[4857]: I1128 13:20:25.309139 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:25 crc kubenswrapper[4857]: I1128 13:20:25.309208 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:25 crc kubenswrapper[4857]: I1128 13:20:25.309186 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:25 crc kubenswrapper[4857]: E1128 13:20:25.309357 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:25 crc kubenswrapper[4857]: E1128 13:20:25.309514 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:25 crc kubenswrapper[4857]: E1128 13:20:25.309645 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:27 crc kubenswrapper[4857]: I1128 13:20:27.309157 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:27 crc kubenswrapper[4857]: I1128 13:20:27.309261 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:27 crc kubenswrapper[4857]: I1128 13:20:27.309186 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:27 crc kubenswrapper[4857]: I1128 13:20:27.309172 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:27 crc kubenswrapper[4857]: E1128 13:20:27.309400 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:27 crc kubenswrapper[4857]: E1128 13:20:27.309548 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:27 crc kubenswrapper[4857]: E1128 13:20:27.309689 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:27 crc kubenswrapper[4857]: E1128 13:20:27.309884 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:28 crc kubenswrapper[4857]: E1128 13:20:28.266672 4857 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 28 13:20:28 crc kubenswrapper[4857]: I1128 13:20:28.311438 4857 scope.go:117] "RemoveContainer" containerID="4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f" Nov 28 13:20:28 crc kubenswrapper[4857]: E1128 13:20:28.409442 4857 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 13:20:28 crc kubenswrapper[4857]: I1128 13:20:28.979679 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovnkube-controller/3.log" Nov 28 13:20:28 crc kubenswrapper[4857]: I1128 13:20:28.983474 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerStarted","Data":"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99"} Nov 28 13:20:28 crc kubenswrapper[4857]: I1128 13:20:28.983912 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:20:29 crc kubenswrapper[4857]: I1128 13:20:29.014780 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" podStartSLOduration=101.01476593 podStartE2EDuration="1m41.01476593s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:29.013974857 +0000 UTC m=+121.041350024" watchObservedRunningTime="2025-11-28 13:20:29.01476593 +0000 UTC m=+121.042141097" Nov 28 13:20:29 crc kubenswrapper[4857]: I1128 13:20:29.092390 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-jspn8"] Nov 28 13:20:29 crc kubenswrapper[4857]: I1128 13:20:29.093625 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:29 crc kubenswrapper[4857]: E1128 13:20:29.093801 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:29 crc kubenswrapper[4857]: I1128 13:20:29.308823 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:29 crc kubenswrapper[4857]: I1128 13:20:29.308887 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:29 crc kubenswrapper[4857]: E1128 13:20:29.308943 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:29 crc kubenswrapper[4857]: I1128 13:20:29.308822 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:29 crc kubenswrapper[4857]: E1128 13:20:29.309088 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:29 crc kubenswrapper[4857]: E1128 13:20:29.309139 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:31 crc kubenswrapper[4857]: I1128 13:20:31.309076 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:31 crc kubenswrapper[4857]: E1128 13:20:31.309188 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:31 crc kubenswrapper[4857]: I1128 13:20:31.309339 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:31 crc kubenswrapper[4857]: E1128 13:20:31.309380 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:31 crc kubenswrapper[4857]: I1128 13:20:31.309466 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:31 crc kubenswrapper[4857]: E1128 13:20:31.309508 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:31 crc kubenswrapper[4857]: I1128 13:20:31.309590 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:31 crc kubenswrapper[4857]: E1128 13:20:31.309660 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:33 crc kubenswrapper[4857]: I1128 13:20:33.309062 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:33 crc kubenswrapper[4857]: I1128 13:20:33.309072 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:33 crc kubenswrapper[4857]: I1128 13:20:33.309082 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:33 crc kubenswrapper[4857]: I1128 13:20:33.309091 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:33 crc kubenswrapper[4857]: E1128 13:20:33.309252 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:33 crc kubenswrapper[4857]: E1128 13:20:33.309333 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:33 crc kubenswrapper[4857]: E1128 13:20:33.309400 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:33 crc kubenswrapper[4857]: E1128 13:20:33.309461 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:33 crc kubenswrapper[4857]: E1128 13:20:33.411500 4857 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 13:20:35 crc kubenswrapper[4857]: I1128 13:20:35.308580 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:35 crc kubenswrapper[4857]: I1128 13:20:35.308646 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:35 crc kubenswrapper[4857]: E1128 13:20:35.309183 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:35 crc kubenswrapper[4857]: I1128 13:20:35.308931 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:35 crc kubenswrapper[4857]: I1128 13:20:35.309300 4857 scope.go:117] "RemoveContainer" containerID="81125daca139d6b77545a7ffee9064cd2fd693de61ae093e889ec72440be4856" Nov 28 13:20:35 crc kubenswrapper[4857]: E1128 13:20:35.309333 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:35 crc kubenswrapper[4857]: I1128 13:20:35.308693 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:35 crc kubenswrapper[4857]: E1128 13:20:35.309545 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:35 crc kubenswrapper[4857]: E1128 13:20:35.309692 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:37 crc kubenswrapper[4857]: I1128 13:20:37.013404 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-tzg2g_1031bdc4-d6c6-4425-805b-506069f5667d/kube-multus/1.log" Nov 28 13:20:37 crc kubenswrapper[4857]: I1128 13:20:37.013475 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-tzg2g" event={"ID":"1031bdc4-d6c6-4425-805b-506069f5667d","Type":"ContainerStarted","Data":"255dccf9694a6567337261a11825e45d80269685bac4522118e6cb077d34971e"} Nov 28 13:20:37 crc kubenswrapper[4857]: I1128 13:20:37.308594 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:37 crc kubenswrapper[4857]: I1128 13:20:37.308667 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:37 crc kubenswrapper[4857]: E1128 13:20:37.308827 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jspn8" podUID="9ab9b94a-66a7-4d68-8046-d6d97595330d" Nov 28 13:20:37 crc kubenswrapper[4857]: I1128 13:20:37.308866 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:37 crc kubenswrapper[4857]: E1128 13:20:37.309010 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:20:37 crc kubenswrapper[4857]: E1128 13:20:37.309109 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:20:37 crc kubenswrapper[4857]: I1128 13:20:37.309303 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:37 crc kubenswrapper[4857]: E1128 13:20:37.309403 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:20:39 crc kubenswrapper[4857]: I1128 13:20:39.309322 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:39 crc kubenswrapper[4857]: I1128 13:20:39.309416 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:39 crc kubenswrapper[4857]: I1128 13:20:39.309453 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:20:39 crc kubenswrapper[4857]: I1128 13:20:39.309431 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:39 crc kubenswrapper[4857]: I1128 13:20:39.313284 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 13:20:39 crc kubenswrapper[4857]: I1128 13:20:39.313363 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 13:20:39 crc kubenswrapper[4857]: I1128 13:20:39.313546 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 13:20:39 crc kubenswrapper[4857]: I1128 13:20:39.313562 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 13:20:39 crc kubenswrapper[4857]: I1128 13:20:39.314171 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 13:20:39 crc kubenswrapper[4857]: I1128 13:20:39.316673 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.298273 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.343132 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.343721 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.347772 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.348590 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.349178 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.349436 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-rc8cq"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.349569 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.349781 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.350491 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.351088 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.351299 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-fx2d9"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.353069 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-rc8cq" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.355150 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.359804 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-x8g9t"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.360725 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.361181 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-fx2d9" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.384112 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.384302 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.384422 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.384513 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.384636 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.384668 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.384746 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.384870 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.384901 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.384440 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.385060 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.385165 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.385244 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.387046 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.387296 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.387347 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.391208 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d1c0a9ad-6310-4b36-82cf-775aad2a3232-auth-proxy-config\") pod \"machine-approver-56656f9798-wn59p\" (UID: \"d1c0a9ad-6310-4b36-82cf-775aad2a3232\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.391259 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/7f4befc0-7e37-4e54-ba59-0e4f698980b6-etcd-client\") pod \"etcd-operator-b45778765-x8g9t\" (UID: \"7f4befc0-7e37-4e54-ba59-0e4f698980b6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.391334 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3a15c6a7-1b90-42e1-8e1d-ccdba481e6db-serving-cert\") pod \"console-operator-58897d9998-fx2d9\" (UID: \"3a15c6a7-1b90-42e1-8e1d-ccdba481e6db\") " pod="openshift-console-operator/console-operator-58897d9998-fx2d9" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.391362 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1c0a9ad-6310-4b36-82cf-775aad2a3232-config\") pod \"machine-approver-56656f9798-wn59p\" (UID: \"d1c0a9ad-6310-4b36-82cf-775aad2a3232\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.391385 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pkhr\" (UniqueName: \"kubernetes.io/projected/de8cf09d-8247-4f1f-bce9-01472e9ee181-kube-api-access-6pkhr\") pod \"downloads-7954f5f757-rc8cq\" (UID: \"de8cf09d-8247-4f1f-bce9-01472e9ee181\") " pod="openshift-console/downloads-7954f5f757-rc8cq" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.391409 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/7f4befc0-7e37-4e54-ba59-0e4f698980b6-etcd-ca\") pod \"etcd-operator-b45778765-x8g9t\" (UID: \"7f4befc0-7e37-4e54-ba59-0e4f698980b6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.391444 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-client-ca\") pod \"route-controller-manager-6576b87f9c-fv2tb\" (UID: \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.391477 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-config\") pod \"route-controller-manager-6576b87f9c-fv2tb\" (UID: \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.391506 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3a15c6a7-1b90-42e1-8e1d-ccdba481e6db-trusted-ca\") pod \"console-operator-58897d9998-fx2d9\" (UID: \"3a15c6a7-1b90-42e1-8e1d-ccdba481e6db\") " pod="openshift-console-operator/console-operator-58897d9998-fx2d9" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.391537 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2fdf\" (UniqueName: \"kubernetes.io/projected/3a15c6a7-1b90-42e1-8e1d-ccdba481e6db-kube-api-access-g2fdf\") pod \"console-operator-58897d9998-fx2d9\" (UID: \"3a15c6a7-1b90-42e1-8e1d-ccdba481e6db\") " pod="openshift-console-operator/console-operator-58897d9998-fx2d9" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.391568 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f4befc0-7e37-4e54-ba59-0e4f698980b6-config\") pod \"etcd-operator-b45778765-x8g9t\" (UID: \"7f4befc0-7e37-4e54-ba59-0e4f698980b6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.391592 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/d1c0a9ad-6310-4b36-82cf-775aad2a3232-machine-approver-tls\") pod \"machine-approver-56656f9798-wn59p\" (UID: \"d1c0a9ad-6310-4b36-82cf-775aad2a3232\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.391619 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/7f4befc0-7e37-4e54-ba59-0e4f698980b6-etcd-service-ca\") pod \"etcd-operator-b45778765-x8g9t\" (UID: \"7f4befc0-7e37-4e54-ba59-0e4f698980b6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.391645 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czf6g\" (UniqueName: \"kubernetes.io/projected/7f4befc0-7e37-4e54-ba59-0e4f698980b6-kube-api-access-czf6g\") pod \"etcd-operator-b45778765-x8g9t\" (UID: \"7f4befc0-7e37-4e54-ba59-0e4f698980b6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.391670 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrl5j\" (UniqueName: \"kubernetes.io/projected/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-kube-api-access-rrl5j\") pod \"route-controller-manager-6576b87f9c-fv2tb\" (UID: \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.391702 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f4befc0-7e37-4e54-ba59-0e4f698980b6-serving-cert\") pod \"etcd-operator-b45778765-x8g9t\" (UID: \"7f4befc0-7e37-4e54-ba59-0e4f698980b6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.391733 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-serving-cert\") pod \"route-controller-manager-6576b87f9c-fv2tb\" (UID: \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.393983 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tlbl5\" (UniqueName: \"kubernetes.io/projected/d1c0a9ad-6310-4b36-82cf-775aad2a3232-kube-api-access-tlbl5\") pod \"machine-approver-56656f9798-wn59p\" (UID: \"d1c0a9ad-6310-4b36-82cf-775aad2a3232\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.394049 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a15c6a7-1b90-42e1-8e1d-ccdba481e6db-config\") pod \"console-operator-58897d9998-fx2d9\" (UID: \"3a15c6a7-1b90-42e1-8e1d-ccdba481e6db\") " pod="openshift-console-operator/console-operator-58897d9998-fx2d9" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.395870 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zpqmp"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.396578 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.396873 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-t448t"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.406214 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.407338 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.407900 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.408414 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.408795 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.408948 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.409095 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.409212 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.409316 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.409431 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.409774 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.410038 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.411939 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.413110 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-hzw48"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.413946 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.414508 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pkcmd"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.415034 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pkcmd" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.416162 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.420493 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.420899 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.421142 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.421871 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.422023 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.423433 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.423677 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.423928 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.424031 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.424515 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c544t"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.425946 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c544t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.430918 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-244qv"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.431244 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.431285 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.456876 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.457063 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.457202 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.434704 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.443682 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.462358 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-pxdz7"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.443831 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.443982 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.445978 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.446071 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.446157 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.446199 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.448707 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.451119 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.454076 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.455060 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.460420 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.474979 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-244qv" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.477603 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.479265 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77ck4"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.479559 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-gqtr2"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.480009 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-7plbl"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.480292 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.480710 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-2xzcp"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.481269 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.481588 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.482040 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-t9k6q"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.482434 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.482910 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.483674 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.483981 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-pxdz7" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.484185 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77ck4" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.484359 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-gqtr2" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.484540 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.485103 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.490407 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-tz5lk"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.491048 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-tz5lk" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.493555 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-rc8cq"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.494554 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/15243bb2-b17a-4ad4-b4f9-fbb592883207-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-t9k6q\" (UID: \"15243bb2-b17a-4ad4-b4f9-fbb592883207\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495558 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/15243bb2-b17a-4ad4-b4f9-fbb592883207-serving-cert\") pod \"authentication-operator-69f744f599-t9k6q\" (UID: \"15243bb2-b17a-4ad4-b4f9-fbb592883207\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495586 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56bnk\" (UniqueName: \"kubernetes.io/projected/7e845cce-5c0e-4fad-bd24-6ff321ea3c02-kube-api-access-56bnk\") pod \"cluster-samples-operator-665b6dd947-c544t\" (UID: \"7e845cce-5c0e-4fad-bd24-6ff321ea3c02\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c544t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495619 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shxrp\" (UniqueName: \"kubernetes.io/projected/15243bb2-b17a-4ad4-b4f9-fbb592883207-kube-api-access-shxrp\") pod \"authentication-operator-69f744f599-t9k6q\" (UID: \"15243bb2-b17a-4ad4-b4f9-fbb592883207\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495642 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/15243bb2-b17a-4ad4-b4f9-fbb592883207-service-ca-bundle\") pod \"authentication-operator-69f744f599-t9k6q\" (UID: \"15243bb2-b17a-4ad4-b4f9-fbb592883207\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495662 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3a15c6a7-1b90-42e1-8e1d-ccdba481e6db-serving-cert\") pod \"console-operator-58897d9998-fx2d9\" (UID: \"3a15c6a7-1b90-42e1-8e1d-ccdba481e6db\") " pod="openshift-console-operator/console-operator-58897d9998-fx2d9" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495680 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1c0a9ad-6310-4b36-82cf-775aad2a3232-config\") pod \"machine-approver-56656f9798-wn59p\" (UID: \"d1c0a9ad-6310-4b36-82cf-775aad2a3232\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495696 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pkhr\" (UniqueName: \"kubernetes.io/projected/de8cf09d-8247-4f1f-bce9-01472e9ee181-kube-api-access-6pkhr\") pod \"downloads-7954f5f757-rc8cq\" (UID: \"de8cf09d-8247-4f1f-bce9-01472e9ee181\") " pod="openshift-console/downloads-7954f5f757-rc8cq" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495712 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/7f4befc0-7e37-4e54-ba59-0e4f698980b6-etcd-ca\") pod \"etcd-operator-b45778765-x8g9t\" (UID: \"7f4befc0-7e37-4e54-ba59-0e4f698980b6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495734 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-config\") pod \"route-controller-manager-6576b87f9c-fv2tb\" (UID: \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495767 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-client-ca\") pod \"route-controller-manager-6576b87f9c-fv2tb\" (UID: \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495785 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3a15c6a7-1b90-42e1-8e1d-ccdba481e6db-trusted-ca\") pod \"console-operator-58897d9998-fx2d9\" (UID: \"3a15c6a7-1b90-42e1-8e1d-ccdba481e6db\") " pod="openshift-console-operator/console-operator-58897d9998-fx2d9" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495802 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2fdf\" (UniqueName: \"kubernetes.io/projected/3a15c6a7-1b90-42e1-8e1d-ccdba481e6db-kube-api-access-g2fdf\") pod \"console-operator-58897d9998-fx2d9\" (UID: \"3a15c6a7-1b90-42e1-8e1d-ccdba481e6db\") " pod="openshift-console-operator/console-operator-58897d9998-fx2d9" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495819 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f4befc0-7e37-4e54-ba59-0e4f698980b6-config\") pod \"etcd-operator-b45778765-x8g9t\" (UID: \"7f4befc0-7e37-4e54-ba59-0e4f698980b6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495835 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/d1c0a9ad-6310-4b36-82cf-775aad2a3232-machine-approver-tls\") pod \"machine-approver-56656f9798-wn59p\" (UID: \"d1c0a9ad-6310-4b36-82cf-775aad2a3232\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495854 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/7f4befc0-7e37-4e54-ba59-0e4f698980b6-etcd-service-ca\") pod \"etcd-operator-b45778765-x8g9t\" (UID: \"7f4befc0-7e37-4e54-ba59-0e4f698980b6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495871 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czf6g\" (UniqueName: \"kubernetes.io/projected/7f4befc0-7e37-4e54-ba59-0e4f698980b6-kube-api-access-czf6g\") pod \"etcd-operator-b45778765-x8g9t\" (UID: \"7f4befc0-7e37-4e54-ba59-0e4f698980b6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495892 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrl5j\" (UniqueName: \"kubernetes.io/projected/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-kube-api-access-rrl5j\") pod \"route-controller-manager-6576b87f9c-fv2tb\" (UID: \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495916 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15243bb2-b17a-4ad4-b4f9-fbb592883207-config\") pod \"authentication-operator-69f744f599-t9k6q\" (UID: \"15243bb2-b17a-4ad4-b4f9-fbb592883207\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495941 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f4befc0-7e37-4e54-ba59-0e4f698980b6-serving-cert\") pod \"etcd-operator-b45778765-x8g9t\" (UID: \"7f4befc0-7e37-4e54-ba59-0e4f698980b6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495959 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-serving-cert\") pod \"route-controller-manager-6576b87f9c-fv2tb\" (UID: \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.495981 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tlbl5\" (UniqueName: \"kubernetes.io/projected/d1c0a9ad-6310-4b36-82cf-775aad2a3232-kube-api-access-tlbl5\") pod \"machine-approver-56656f9798-wn59p\" (UID: \"d1c0a9ad-6310-4b36-82cf-775aad2a3232\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.496003 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a15c6a7-1b90-42e1-8e1d-ccdba481e6db-config\") pod \"console-operator-58897d9998-fx2d9\" (UID: \"3a15c6a7-1b90-42e1-8e1d-ccdba481e6db\") " pod="openshift-console-operator/console-operator-58897d9998-fx2d9" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.496026 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d1c0a9ad-6310-4b36-82cf-775aad2a3232-auth-proxy-config\") pod \"machine-approver-56656f9798-wn59p\" (UID: \"d1c0a9ad-6310-4b36-82cf-775aad2a3232\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.496042 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/7f4befc0-7e37-4e54-ba59-0e4f698980b6-etcd-client\") pod \"etcd-operator-b45778765-x8g9t\" (UID: \"7f4befc0-7e37-4e54-ba59-0e4f698980b6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.496065 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/7e845cce-5c0e-4fad-bd24-6ff321ea3c02-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-c544t\" (UID: \"7e845cce-5c0e-4fad-bd24-6ff321ea3c02\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c544t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.496483 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.497084 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.497429 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mfb8j"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.497554 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.497689 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.497864 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.498011 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.498053 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mfb8j" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.498194 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.499301 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/7f4befc0-7e37-4e54-ba59-0e4f698980b6-etcd-service-ca\") pod \"etcd-operator-b45778765-x8g9t\" (UID: \"7f4befc0-7e37-4e54-ba59-0e4f698980b6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.500160 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-client-ca\") pod \"route-controller-manager-6576b87f9c-fv2tb\" (UID: \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.500441 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3a15c6a7-1b90-42e1-8e1d-ccdba481e6db-trusted-ca\") pod \"console-operator-58897d9998-fx2d9\" (UID: \"3a15c6a7-1b90-42e1-8e1d-ccdba481e6db\") " pod="openshift-console-operator/console-operator-58897d9998-fx2d9" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.501056 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1c0a9ad-6310-4b36-82cf-775aad2a3232-config\") pod \"machine-approver-56656f9798-wn59p\" (UID: \"d1c0a9ad-6310-4b36-82cf-775aad2a3232\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.501148 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-zbwhx"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.501990 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.501169 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f4befc0-7e37-4e54-ba59-0e4f698980b6-config\") pod \"etcd-operator-b45778765-x8g9t\" (UID: \"7f4befc0-7e37-4e54-ba59-0e4f698980b6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.503524 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-config\") pod \"route-controller-manager-6576b87f9c-fv2tb\" (UID: \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.503614 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.503722 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-46bkn"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.503905 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.504070 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.504129 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d1c0a9ad-6310-4b36-82cf-775aad2a3232-auth-proxy-config\") pod \"machine-approver-56656f9798-wn59p\" (UID: \"d1c0a9ad-6310-4b36-82cf-775aad2a3232\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.504155 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a15c6a7-1b90-42e1-8e1d-ccdba481e6db-config\") pod \"console-operator-58897d9998-fx2d9\" (UID: \"3a15c6a7-1b90-42e1-8e1d-ccdba481e6db\") " pod="openshift-console-operator/console-operator-58897d9998-fx2d9" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.504223 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.504309 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.504367 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.504549 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.505002 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.505163 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.505314 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.505698 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-serving-cert\") pod \"route-controller-manager-6576b87f9c-fv2tb\" (UID: \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.505830 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.506061 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.506329 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.506388 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/d1c0a9ad-6310-4b36-82cf-775aad2a3232-machine-approver-tls\") pod \"machine-approver-56656f9798-wn59p\" (UID: \"d1c0a9ad-6310-4b36-82cf-775aad2a3232\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.506504 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.506576 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.506709 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.506861 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.506929 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.507009 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.507049 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.505000 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/7f4befc0-7e37-4e54-ba59-0e4f698980b6-etcd-ca\") pod \"etcd-operator-b45778765-x8g9t\" (UID: \"7f4befc0-7e37-4e54-ba59-0e4f698980b6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.507229 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.507284 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.507285 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.507375 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.507445 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.507458 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.507540 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.507572 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.507580 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.507699 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.511081 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9ldq"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.511554 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.511983 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.512361 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.512587 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-w9chp"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.512729 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9ldq" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.512982 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-46bkn" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.513950 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.514687 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.515515 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.515729 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.517077 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.518047 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f4befc0-7e37-4e54-ba59-0e4f698980b6-serving-cert\") pod \"etcd-operator-b45778765-x8g9t\" (UID: \"7f4befc0-7e37-4e54-ba59-0e4f698980b6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.520617 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.521133 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/7f4befc0-7e37-4e54-ba59-0e4f698980b6-etcd-client\") pod \"etcd-operator-b45778765-x8g9t\" (UID: \"7f4befc0-7e37-4e54-ba59-0e4f698980b6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.521995 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.522856 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fqxp6"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.523360 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-hvs7l"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.524927 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.526254 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-slgpm"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.531382 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.531503 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.532842 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-w9chp" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.533004 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.533301 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fqxp6" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.533400 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-hvs7l" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.533555 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.533692 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-slgpm" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.534734 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.536908 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.541706 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wlfw4"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.542065 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.548281 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5jcnx"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.549440 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.549509 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3a15c6a7-1b90-42e1-8e1d-ccdba481e6db-serving-cert\") pod \"console-operator-58897d9998-fx2d9\" (UID: \"3a15c6a7-1b90-42e1-8e1d-ccdba481e6db\") " pod="openshift-console-operator/console-operator-58897d9998-fx2d9" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.550247 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.550828 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wlfw4" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.553181 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.554328 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-fx2d9"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.566127 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.567208 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-9g9fd"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.568701 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-qr8zs"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.569406 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qr8zs" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.569668 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.578805 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zpqmp"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.578864 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pkcmd"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.581936 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-244qv"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.583823 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-7plbl"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.584165 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c544t"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.585491 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-hzw48"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.587375 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.588383 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.590004 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-t9k6q"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.592007 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-t448t"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.593665 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mfb8j"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.595125 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-vwm2l"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.595651 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-vwm2l" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.596367 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-tcbhr"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.596896 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-tcbhr" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.596910 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/7e845cce-5c0e-4fad-bd24-6ff321ea3c02-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-c544t\" (UID: \"7e845cce-5c0e-4fad-bd24-6ff321ea3c02\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c544t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.596942 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/15243bb2-b17a-4ad4-b4f9-fbb592883207-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-t9k6q\" (UID: \"15243bb2-b17a-4ad4-b4f9-fbb592883207\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.596966 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/15243bb2-b17a-4ad4-b4f9-fbb592883207-serving-cert\") pod \"authentication-operator-69f744f599-t9k6q\" (UID: \"15243bb2-b17a-4ad4-b4f9-fbb592883207\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.596990 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56bnk\" (UniqueName: \"kubernetes.io/projected/7e845cce-5c0e-4fad-bd24-6ff321ea3c02-kube-api-access-56bnk\") pod \"cluster-samples-operator-665b6dd947-c544t\" (UID: \"7e845cce-5c0e-4fad-bd24-6ff321ea3c02\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c544t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.597015 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shxrp\" (UniqueName: \"kubernetes.io/projected/15243bb2-b17a-4ad4-b4f9-fbb592883207-kube-api-access-shxrp\") pod \"authentication-operator-69f744f599-t9k6q\" (UID: \"15243bb2-b17a-4ad4-b4f9-fbb592883207\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.597039 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/15243bb2-b17a-4ad4-b4f9-fbb592883207-service-ca-bundle\") pod \"authentication-operator-69f744f599-t9k6q\" (UID: \"15243bb2-b17a-4ad4-b4f9-fbb592883207\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.597096 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15243bb2-b17a-4ad4-b4f9-fbb592883207-config\") pod \"authentication-operator-69f744f599-t9k6q\" (UID: \"15243bb2-b17a-4ad4-b4f9-fbb592883207\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.598066 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15243bb2-b17a-4ad4-b4f9-fbb592883207-config\") pod \"authentication-operator-69f744f599-t9k6q\" (UID: \"15243bb2-b17a-4ad4-b4f9-fbb592883207\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.598699 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.599282 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/15243bb2-b17a-4ad4-b4f9-fbb592883207-service-ca-bundle\") pod \"authentication-operator-69f744f599-t9k6q\" (UID: \"15243bb2-b17a-4ad4-b4f9-fbb592883207\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.601094 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/7e845cce-5c0e-4fad-bd24-6ff321ea3c02-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-c544t\" (UID: \"7e845cce-5c0e-4fad-bd24-6ff321ea3c02\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c544t" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.601613 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-46bkn"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.602901 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-x8g9t"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.604870 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77ck4"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.605914 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.605922 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5jcnx"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.606904 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-2xzcp"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.608402 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-tz5lk"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.609198 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.609957 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9ldq"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.611001 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.612164 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.613181 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-9g9fd"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.614047 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/15243bb2-b17a-4ad4-b4f9-fbb592883207-serving-cert\") pod \"authentication-operator-69f744f599-t9k6q\" (UID: \"15243bb2-b17a-4ad4-b4f9-fbb592883207\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.614389 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-gqtr2"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.615286 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-hvs7l"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.616516 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-pxdz7"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.617346 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.618349 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-tcbhr"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.619374 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.620574 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-w9chp"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.621477 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fqxp6"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.622544 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wlfw4"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.623656 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-slgpm"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.624719 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-qr8zs"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.625712 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.626204 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.627221 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.627831 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-ddcsr"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.630084 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-ddcsr" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.638574 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-ddcsr"] Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.666189 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.686660 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.706600 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.726031 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.746413 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.766658 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.786776 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.809703 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.854672 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.858334 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/15243bb2-b17a-4ad4-b4f9-fbb592883207-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-t9k6q\" (UID: \"15243bb2-b17a-4ad4-b4f9-fbb592883207\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.866012 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.886951 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.906609 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.926566 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.966352 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2fdf\" (UniqueName: \"kubernetes.io/projected/3a15c6a7-1b90-42e1-8e1d-ccdba481e6db-kube-api-access-g2fdf\") pod \"console-operator-58897d9998-fx2d9\" (UID: \"3a15c6a7-1b90-42e1-8e1d-ccdba481e6db\") " pod="openshift-console-operator/console-operator-58897d9998-fx2d9" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.984712 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pkhr\" (UniqueName: \"kubernetes.io/projected/de8cf09d-8247-4f1f-bce9-01472e9ee181-kube-api-access-6pkhr\") pod \"downloads-7954f5f757-rc8cq\" (UID: \"de8cf09d-8247-4f1f-bce9-01472e9ee181\") " pod="openshift-console/downloads-7954f5f757-rc8cq" Nov 28 13:20:44 crc kubenswrapper[4857]: I1128 13:20:44.986370 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.024255 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrl5j\" (UniqueName: \"kubernetes.io/projected/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-kube-api-access-rrl5j\") pod \"route-controller-manager-6576b87f9c-fv2tb\" (UID: \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.028820 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-rc8cq" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.038824 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czf6g\" (UniqueName: \"kubernetes.io/projected/7f4befc0-7e37-4e54-ba59-0e4f698980b6-kube-api-access-czf6g\") pod \"etcd-operator-b45778765-x8g9t\" (UID: \"7f4befc0-7e37-4e54-ba59-0e4f698980b6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.046503 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.065875 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.086542 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.090899 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.126599 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.128669 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tlbl5\" (UniqueName: \"kubernetes.io/projected/d1c0a9ad-6310-4b36-82cf-775aad2a3232-kube-api-access-tlbl5\") pod \"machine-approver-56656f9798-wn59p\" (UID: \"d1c0a9ad-6310-4b36-82cf-775aad2a3232\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.133371 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-fx2d9" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.147252 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.166136 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.186932 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.206460 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-rc8cq"] Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.207086 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.226221 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.273848 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.274383 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.280014 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.285770 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.298417 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-x8g9t"] Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.305780 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 28 13:20:45 crc kubenswrapper[4857]: W1128 13:20:45.305961 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7f4befc0_7e37_4e54_ba59_0e4f698980b6.slice/crio-d36d92dfcf683bfefb7420443afed88fcbf6f181e619cdb9384b2b18cff224f0 WatchSource:0}: Error finding container d36d92dfcf683bfefb7420443afed88fcbf6f181e619cdb9384b2b18cff224f0: Status 404 returned error can't find the container with id d36d92dfcf683bfefb7420443afed88fcbf6f181e619cdb9384b2b18cff224f0 Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.325822 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.347163 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.361430 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-fx2d9"] Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.364720 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.366426 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 28 13:20:45 crc kubenswrapper[4857]: W1128 13:20:45.369387 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3a15c6a7_1b90_42e1_8e1d_ccdba481e6db.slice/crio-85f6f550d00a12372d9e397fc309f441d414d9be8b0c604f896f3d39f3ebba15 WatchSource:0}: Error finding container 85f6f550d00a12372d9e397fc309f441d414d9be8b0c604f896f3d39f3ebba15: Status 404 returned error can't find the container with id 85f6f550d00a12372d9e397fc309f441d414d9be8b0c604f896f3d39f3ebba15 Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.386331 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.406466 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.426696 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb"] Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.427181 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.447303 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.466714 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.485718 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.506210 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.524606 4857 request.go:700] Waited for 1.009511103s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/configmaps?fieldSelector=metadata.name%3Dtrusted-ca&limit=500&resourceVersion=0 Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.533479 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.545566 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.566146 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.586095 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.606854 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.626630 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.646355 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.666454 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.685674 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.707574 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.726699 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.747118 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.767495 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.787075 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.807265 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.827153 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.846580 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.866309 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.886667 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.906596 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.927167 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.947185 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.966520 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 28 13:20:45 crc kubenswrapper[4857]: I1128 13:20:45.986999 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.007455 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.027144 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.043810 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" event={"ID":"7f4befc0-7e37-4e54-ba59-0e4f698980b6","Type":"ContainerStarted","Data":"d36d92dfcf683bfefb7420443afed88fcbf6f181e619cdb9384b2b18cff224f0"} Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.046500 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p" event={"ID":"d1c0a9ad-6310-4b36-82cf-775aad2a3232","Type":"ContainerStarted","Data":"fb36eac8f3da5fcf6c0fc201e25741afa2a0de5b457911f4b384f20d234e9d2c"} Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.046679 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.047545 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-rc8cq" event={"ID":"de8cf09d-8247-4f1f-bce9-01472e9ee181","Type":"ContainerStarted","Data":"f1a1069247f2ab66437ada7654016f9fa43558cc087b1a9e7dcecaf2ac475452"} Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.048955 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-fx2d9" event={"ID":"3a15c6a7-1b90-42e1-8e1d-ccdba481e6db","Type":"ContainerStarted","Data":"85f6f550d00a12372d9e397fc309f441d414d9be8b0c604f896f3d39f3ebba15"} Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.066924 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.085949 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.106215 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.126929 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.151692 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.176176 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.186297 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.206170 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.226844 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.247370 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.266575 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.286788 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.306987 4857 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.327075 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.346490 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.366976 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.386355 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.405978 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.426675 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.465967 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.502867 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56bnk\" (UniqueName: \"kubernetes.io/projected/7e845cce-5c0e-4fad-bd24-6ff321ea3c02-kube-api-access-56bnk\") pod \"cluster-samples-operator-665b6dd947-c544t\" (UID: \"7e845cce-5c0e-4fad-bd24-6ff321ea3c02\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c544t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.505724 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.525047 4857 request.go:700] Waited for 1.894624184s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-canary/secrets?fieldSelector=metadata.name%3Dcanary-serving-cert&limit=500&resourceVersion=0 Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.532327 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.547581 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.566463 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.717934 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c544t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.719090 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wp69c\" (UniqueName: \"kubernetes.io/projected/e846921b-6840-420c-a782-fba505744883-kube-api-access-wp69c\") pod \"openshift-config-operator-7777fb866f-fvj5x\" (UID: \"e846921b-6840-420c-a782-fba505744883\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.719200 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-console-serving-cert\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.719254 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-service-ca\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.719331 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/e846921b-6840-420c-a782-fba505744883-available-featuregates\") pod \"openshift-config-operator-7777fb866f-fvj5x\" (UID: \"e846921b-6840-420c-a782-fba505744883\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.719453 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/eba66557-699a-4be9-bc8e-fcedf6155f7e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-pxdz7\" (UID: \"eba66557-699a-4be9-bc8e-fcedf6155f7e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pxdz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.719598 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e22d80c8-5706-475b-a385-00c22ad2eaea-client-ca\") pod \"controller-manager-879f6c89f-zpqmp\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.719638 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b485r\" (UniqueName: \"kubernetes.io/projected/e22d80c8-5706-475b-a385-00c22ad2eaea-kube-api-access-b485r\") pod \"controller-manager-879f6c89f-zpqmp\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.719805 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: E1128 13:20:46.720288 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:47.220269381 +0000 UTC m=+139.247644568 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.720354 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/eba66557-699a-4be9-bc8e-fcedf6155f7e-images\") pod \"machine-api-operator-5694c8668f-pxdz7\" (UID: \"eba66557-699a-4be9-bc8e-fcedf6155f7e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pxdz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.720431 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e22d80c8-5706-475b-a385-00c22ad2eaea-serving-cert\") pod \"controller-manager-879f6c89f-zpqmp\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.720497 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e22d80c8-5706-475b-a385-00c22ad2eaea-config\") pod \"controller-manager-879f6c89f-zpqmp\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.720557 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-console-oauth-config\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.720842 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5c497e5f-f362-48a4-bf34-833bfdc6de1b-ca-trust-extracted\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.720911 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5c497e5f-f362-48a4-bf34-833bfdc6de1b-installation-pull-secrets\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.721019 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5c497e5f-f362-48a4-bf34-833bfdc6de1b-trusted-ca\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.721069 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hr869\" (UniqueName: \"kubernetes.io/projected/eba66557-699a-4be9-bc8e-fcedf6155f7e-kube-api-access-hr869\") pod \"machine-api-operator-5694c8668f-pxdz7\" (UID: \"eba66557-699a-4be9-bc8e-fcedf6155f7e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pxdz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.721146 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e846921b-6840-420c-a782-fba505744883-serving-cert\") pod \"openshift-config-operator-7777fb866f-fvj5x\" (UID: \"e846921b-6840-420c-a782-fba505744883\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.721213 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eba66557-699a-4be9-bc8e-fcedf6155f7e-config\") pod \"machine-api-operator-5694c8668f-pxdz7\" (UID: \"eba66557-699a-4be9-bc8e-fcedf6155f7e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pxdz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.721483 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f64gl\" (UniqueName: \"kubernetes.io/projected/5c497e5f-f362-48a4-bf34-833bfdc6de1b-kube-api-access-f64gl\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.721557 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5c497e5f-f362-48a4-bf34-833bfdc6de1b-registry-tls\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.721608 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5c497e5f-f362-48a4-bf34-833bfdc6de1b-bound-sa-token\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.721654 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnt67\" (UniqueName: \"kubernetes.io/projected/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-kube-api-access-mnt67\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.721705 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e22d80c8-5706-475b-a385-00c22ad2eaea-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zpqmp\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.721865 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5c497e5f-f362-48a4-bf34-833bfdc6de1b-registry-certificates\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.721917 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-console-config\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.721964 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-trusted-ca-bundle\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.722012 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-oauth-serving-cert\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.739450 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shxrp\" (UniqueName: \"kubernetes.io/projected/15243bb2-b17a-4ad4-b4f9-fbb592883207-kube-api-access-shxrp\") pod \"authentication-operator-69f744f599-t9k6q\" (UID: \"15243bb2-b17a-4ad4-b4f9-fbb592883207\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.823314 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.823661 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wp69c\" (UniqueName: \"kubernetes.io/projected/e846921b-6840-420c-a782-fba505744883-kube-api-access-wp69c\") pod \"openshift-config-operator-7777fb866f-fvj5x\" (UID: \"e846921b-6840-420c-a782-fba505744883\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.823732 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/18a1ac68-146b-4c80-a763-df4b75e6698d-stats-auth\") pod \"router-default-5444994796-zbwhx\" (UID: \"18a1ac68-146b-4c80-a763-df4b75e6698d\") " pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.823785 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c889x\" (UniqueName: \"kubernetes.io/projected/b008b6be-e0fb-4486-9543-1343d458badd-kube-api-access-c889x\") pod \"dns-operator-744455d44c-gqtr2\" (UID: \"b008b6be-e0fb-4486-9543-1343d458badd\") " pod="openshift-dns-operator/dns-operator-744455d44c-gqtr2" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.823807 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6d834626-adda-4075-9196-bbf271b7b785-encryption-config\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.823825 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2240981f-6726-4fd9-b158-eae175371451-srv-cert\") pod \"olm-operator-6b444d44fb-8tnz7\" (UID: \"2240981f-6726-4fd9-b158-eae175371451\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.823855 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.823878 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wslz\" (UniqueName: \"kubernetes.io/projected/03986f47-8037-41dd-a995-684a9296a676-kube-api-access-7wslz\") pod \"csi-hostpathplugin-9g9fd\" (UID: \"03986f47-8037-41dd-a995-684a9296a676\") " pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.823898 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/18a1ac68-146b-4c80-a763-df4b75e6698d-default-certificate\") pod \"router-default-5444994796-zbwhx\" (UID: \"18a1ac68-146b-4c80-a763-df4b75e6698d\") " pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.823929 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8f67g\" (UniqueName: \"kubernetes.io/projected/70c93db6-6e53-4870-92f3-e6335deb6936-kube-api-access-8f67g\") pod \"dns-default-tcbhr\" (UID: \"70c93db6-6e53-4870-92f3-e6335deb6936\") " pod="openshift-dns/dns-default-tcbhr" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.823952 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.823972 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/40c7002a-571a-4c01-bfb7-a6bbf316a615-etcd-client\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.823995 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee-webhook-cert\") pod \"packageserver-d55dfcdfc-fm4ct\" (UID: \"a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824035 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/e846921b-6840-420c-a782-fba505744883-available-featuregates\") pod \"openshift-config-operator-7777fb866f-fvj5x\" (UID: \"e846921b-6840-420c-a782-fba505744883\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824062 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/40c7002a-571a-4c01-bfb7-a6bbf316a615-audit-dir\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824084 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee-apiservice-cert\") pod \"packageserver-d55dfcdfc-fm4ct\" (UID: \"a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824106 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/faa6eb3a-4dd5-4e99-83da-fdd167db88e7-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-tz5lk\" (UID: \"faa6eb3a-4dd5-4e99-83da-fdd167db88e7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-tz5lk" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824129 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/eba66557-699a-4be9-bc8e-fcedf6155f7e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-pxdz7\" (UID: \"eba66557-699a-4be9-bc8e-fcedf6155f7e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pxdz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824151 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wcsn\" (UniqueName: \"kubernetes.io/projected/75c61d4f-c7df-4f0f-b643-6bde1458075a-kube-api-access-6wcsn\") pod \"control-plane-machine-set-operator-78cbb6b69f-mfb8j\" (UID: \"75c61d4f-c7df-4f0f-b643-6bde1458075a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mfb8j" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824172 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/03986f47-8037-41dd-a995-684a9296a676-csi-data-dir\") pod \"csi-hostpathplugin-9g9fd\" (UID: \"03986f47-8037-41dd-a995-684a9296a676\") " pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824225 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/95c3adf6-7fac-411c-9f54-9eea69052b94-node-bootstrap-token\") pod \"machine-config-server-vwm2l\" (UID: \"95c3adf6-7fac-411c-9f54-9eea69052b94\") " pod="openshift-machine-config-operator/machine-config-server-vwm2l" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824286 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cz4z\" (UniqueName: \"kubernetes.io/projected/30934d71-ae7e-491a-933a-f1667b3608e4-kube-api-access-8cz4z\") pod \"marketplace-operator-79b997595-5jcnx\" (UID: \"30934d71-ae7e-491a-933a-f1667b3608e4\") " pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824310 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/faa6eb3a-4dd5-4e99-83da-fdd167db88e7-proxy-tls\") pod \"machine-config-controller-84d6567774-tz5lk\" (UID: \"faa6eb3a-4dd5-4e99-83da-fdd167db88e7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-tz5lk" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824330 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hknj5\" (UniqueName: \"kubernetes.io/projected/2240981f-6726-4fd9-b158-eae175371451-kube-api-access-hknj5\") pod \"olm-operator-6b444d44fb-8tnz7\" (UID: \"2240981f-6726-4fd9-b158-eae175371451\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824354 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4655590-7cc2-4489-8a43-d897b47bdd45-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-pkcmd\" (UID: \"e4655590-7cc2-4489-8a43-d897b47bdd45\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pkcmd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824395 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fc3f1cee-b032-42c0-9996-8c1b815ad0f6-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-n485x\" (UID: \"fc3f1cee-b032-42c0-9996-8c1b815ad0f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824415 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b0ba2c12-2e58-47ae-af8f-3a877929fee7-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-fqxp6\" (UID: \"b0ba2c12-2e58-47ae-af8f-3a877929fee7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fqxp6" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824448 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/eba66557-699a-4be9-bc8e-fcedf6155f7e-images\") pod \"machine-api-operator-5694c8668f-pxdz7\" (UID: \"eba66557-699a-4be9-bc8e-fcedf6155f7e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pxdz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824471 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824491 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/70c93db6-6e53-4870-92f3-e6335deb6936-metrics-tls\") pod \"dns-default-tcbhr\" (UID: \"70c93db6-6e53-4870-92f3-e6335deb6936\") " pod="openshift-dns/dns-default-tcbhr" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824511 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-audit-policies\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824533 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee-tmpfs\") pod \"packageserver-d55dfcdfc-fm4ct\" (UID: \"a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824552 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/56f8ea27-0443-4e0a-9dd0-2755b61f49f9-srv-cert\") pod \"catalog-operator-68c6474976-rvh62\" (UID: \"56f8ea27-0443-4e0a-9dd0-2755b61f49f9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824578 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/de0c7377-d49e-4651-b681-cddd455fd280-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-hvs7l\" (UID: \"de0c7377-d49e-4651-b681-cddd455fd280\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-hvs7l" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824599 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2010920b-32b8-4a16-9703-99191b3ac0ac-serving-cert\") pod \"service-ca-operator-777779d784-qr8zs\" (UID: \"2010920b-32b8-4a16-9703-99191b3ac0ac\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qr8zs" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824911 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e22d80c8-5706-475b-a385-00c22ad2eaea-config\") pod \"controller-manager-879f6c89f-zpqmp\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824944 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ca56a139-a9ad-46bf-a094-435eef021799-trusted-ca\") pod \"ingress-operator-5b745b69d9-rl4qj\" (UID: \"ca56a139-a9ad-46bf-a094-435eef021799\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824967 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhzgn\" (UniqueName: \"kubernetes.io/projected/ca56a139-a9ad-46bf-a094-435eef021799-kube-api-access-lhzgn\") pod \"ingress-operator-5b745b69d9-rl4qj\" (UID: \"ca56a139-a9ad-46bf-a094-435eef021799\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.824990 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/30934d71-ae7e-491a-933a-f1667b3608e4-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-5jcnx\" (UID: \"30934d71-ae7e-491a-933a-f1667b3608e4\") " pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825017 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/40c7002a-571a-4c01-bfb7-a6bbf316a615-audit\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825036 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/40c7002a-571a-4c01-bfb7-a6bbf316a615-serving-cert\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825086 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zmdv\" (UniqueName: \"kubernetes.io/projected/0eafa688-6c78-44bc-93de-6e300a65a036-kube-api-access-2zmdv\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825107 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cd7d528d-143a-4093-a83a-510b9767a355-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-244qv\" (UID: \"cd7d528d-143a-4093-a83a-510b9767a355\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-244qv" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825143 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5c497e5f-f362-48a4-bf34-833bfdc6de1b-trusted-ca\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825167 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hr869\" (UniqueName: \"kubernetes.io/projected/eba66557-699a-4be9-bc8e-fcedf6155f7e-kube-api-access-hr869\") pod \"machine-api-operator-5694c8668f-pxdz7\" (UID: \"eba66557-699a-4be9-bc8e-fcedf6155f7e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pxdz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825192 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/40c7002a-571a-4c01-bfb7-a6bbf316a615-etcd-serving-ca\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825214 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wx2p\" (UniqueName: \"kubernetes.io/projected/40c7002a-571a-4c01-bfb7-a6bbf316a615-kube-api-access-6wx2p\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825238 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwsbb\" (UniqueName: \"kubernetes.io/projected/18a1ac68-146b-4c80-a763-df4b75e6698d-kube-api-access-mwsbb\") pod \"router-default-5444994796-zbwhx\" (UID: \"18a1ac68-146b-4c80-a763-df4b75e6698d\") " pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825262 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/03986f47-8037-41dd-a995-684a9296a676-socket-dir\") pod \"csi-hostpathplugin-9g9fd\" (UID: \"03986f47-8037-41dd-a995-684a9296a676\") " pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825285 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bfcb44a8-0cab-4938-9f92-0fcc4227a662-auth-proxy-config\") pod \"machine-config-operator-74547568cd-8jrs2\" (UID: \"bfcb44a8-0cab-4938-9f92-0fcc4227a662\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825307 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ca56a139-a9ad-46bf-a094-435eef021799-bound-sa-token\") pod \"ingress-operator-5b745b69d9-rl4qj\" (UID: \"ca56a139-a9ad-46bf-a094-435eef021799\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825329 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0ba2c12-2e58-47ae-af8f-3a877929fee7-config\") pod \"kube-apiserver-operator-766d6c64bb-fqxp6\" (UID: \"b0ba2c12-2e58-47ae-af8f-3a877929fee7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fqxp6" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825352 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/219e3c3b-f767-461c-8573-8c2d8da52328-signing-cabundle\") pod \"service-ca-9c57cc56f-slgpm\" (UID: \"219e3c3b-f767-461c-8573-8c2d8da52328\") " pod="openshift-service-ca/service-ca-9c57cc56f-slgpm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825390 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/40c7002a-571a-4c01-bfb7-a6bbf316a615-node-pullsecrets\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825413 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b008b6be-e0fb-4486-9543-1343d458badd-metrics-tls\") pod \"dns-operator-744455d44c-gqtr2\" (UID: \"b008b6be-e0fb-4486-9543-1343d458badd\") " pod="openshift-dns-operator/dns-operator-744455d44c-gqtr2" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825585 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6f2s7\" (UniqueName: \"kubernetes.io/projected/6d834626-adda-4075-9196-bbf271b7b785-kube-api-access-6f2s7\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825617 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825642 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qd2h\" (UniqueName: \"kubernetes.io/projected/a0ce2b1a-313b-400a-b4e4-2b4b32b1785a-kube-api-access-2qd2h\") pod \"migrator-59844c95c7-w9chp\" (UID: \"a0ce2b1a-313b-400a-b4e4-2b4b32b1785a\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-w9chp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825665 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rswrc\" (UniqueName: \"kubernetes.io/projected/de0c7377-d49e-4651-b681-cddd455fd280-kube-api-access-rswrc\") pod \"multus-admission-controller-857f4d67dd-hvs7l\" (UID: \"de0c7377-d49e-4651-b681-cddd455fd280\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-hvs7l" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825689 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6d834626-adda-4075-9196-bbf271b7b785-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825716 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f64gl\" (UniqueName: \"kubernetes.io/projected/5c497e5f-f362-48a4-bf34-833bfdc6de1b-kube-api-access-f64gl\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825738 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6d834626-adda-4075-9196-bbf271b7b785-etcd-client\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825777 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/219e3c3b-f767-461c-8573-8c2d8da52328-signing-key\") pod \"service-ca-9c57cc56f-slgpm\" (UID: \"219e3c3b-f767-461c-8573-8c2d8da52328\") " pod="openshift-service-ca/service-ca-9c57cc56f-slgpm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825804 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmwcv\" (UniqueName: \"kubernetes.io/projected/f2860ffb-7c3e-488f-af31-bfb8609a67d4-kube-api-access-qmwcv\") pod \"collect-profiles-29405595-dbc4d\" (UID: \"f2860ffb-7c3e-488f-af31-bfb8609a67d4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825826 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fq5hl\" (UniqueName: \"kubernetes.io/projected/2010920b-32b8-4a16-9703-99191b3ac0ac-kube-api-access-fq5hl\") pod \"service-ca-operator-777779d784-qr8zs\" (UID: \"2010920b-32b8-4a16-9703-99191b3ac0ac\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qr8zs" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825851 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkv84\" (UniqueName: \"kubernetes.io/projected/98125640-3946-46e2-aa2b-c70d9a04a8a1-kube-api-access-jkv84\") pod \"kube-storage-version-migrator-operator-b67b599dd-j9ldq\" (UID: \"98125640-3946-46e2-aa2b-c70d9a04a8a1\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9ldq" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825875 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dph7p\" (UniqueName: \"kubernetes.io/projected/219e3c3b-f767-461c-8573-8c2d8da52328-kube-api-access-dph7p\") pod \"service-ca-9c57cc56f-slgpm\" (UID: \"219e3c3b-f767-461c-8573-8c2d8da52328\") " pod="openshift-service-ca/service-ca-9c57cc56f-slgpm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825902 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c493f67-7a6d-4685-afaf-be33fa220751-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-77ck4\" (UID: \"7c493f67-7a6d-4685-afaf-be33fa220751\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77ck4" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825923 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6d834626-adda-4075-9196-bbf271b7b785-audit-policies\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825945 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ca56a139-a9ad-46bf-a094-435eef021799-metrics-tls\") pod \"ingress-operator-5b745b69d9-rl4qj\" (UID: \"ca56a139-a9ad-46bf-a094-435eef021799\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825973 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5c497e5f-f362-48a4-bf34-833bfdc6de1b-bound-sa-token\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.825997 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vw8wb\" (UniqueName: \"kubernetes.io/projected/b2810366-b7c9-44b1-8b46-afba9c91937f-kube-api-access-vw8wb\") pod \"ingress-canary-ddcsr\" (UID: \"b2810366-b7c9-44b1-8b46-afba9c91937f\") " pod="openshift-ingress-canary/ingress-canary-ddcsr" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826038 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-trusted-ca-bundle\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826063 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-oauth-serving-cert\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826100 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826122 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/40c7002a-571a-4c01-bfb7-a6bbf316a615-encryption-config\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826145 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/bfcb44a8-0cab-4938-9f92-0fcc4227a662-images\") pod \"machine-config-operator-74547568cd-8jrs2\" (UID: \"bfcb44a8-0cab-4938-9f92-0fcc4227a662\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826172 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826194 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-966xg\" (UniqueName: \"kubernetes.io/projected/95c3adf6-7fac-411c-9f54-9eea69052b94-kube-api-access-966xg\") pod \"machine-config-server-vwm2l\" (UID: \"95c3adf6-7fac-411c-9f54-9eea69052b94\") " pod="openshift-machine-config-operator/machine-config-server-vwm2l" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826228 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd7d528d-143a-4093-a83a-510b9767a355-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-244qv\" (UID: \"cd7d528d-143a-4093-a83a-510b9767a355\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-244qv" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826249 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6b4q\" (UniqueName: \"kubernetes.io/projected/fc3f1cee-b032-42c0-9996-8c1b815ad0f6-kube-api-access-f6b4q\") pod \"cluster-image-registry-operator-dc59b4c8b-n485x\" (UID: \"fc3f1cee-b032-42c0-9996-8c1b815ad0f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826268 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hn7rn\" (UniqueName: \"kubernetes.io/projected/56f8ea27-0443-4e0a-9dd0-2755b61f49f9-kube-api-access-hn7rn\") pod \"catalog-operator-68c6474976-rvh62\" (UID: \"56f8ea27-0443-4e0a-9dd0-2755b61f49f9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826290 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-service-ca\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826311 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4bab5b07-9cba-4b9f-9c8d-ce540f57347f-config\") pod \"kube-controller-manager-operator-78b949d7b-46bkn\" (UID: \"4bab5b07-9cba-4b9f-9c8d-ce540f57347f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-46bkn" Nov 28 13:20:46 crc kubenswrapper[4857]: E1128 13:20:46.826361 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:47.326346948 +0000 UTC m=+139.353722115 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826387 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-console-serving-cert\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826409 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/40c7002a-571a-4c01-bfb7-a6bbf316a615-trusted-ca-bundle\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826431 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f2860ffb-7c3e-488f-af31-bfb8609a67d4-config-volume\") pod \"collect-profiles-29405595-dbc4d\" (UID: \"f2860ffb-7c3e-488f-af31-bfb8609a67d4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826453 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/18a1ac68-146b-4c80-a763-df4b75e6698d-service-ca-bundle\") pod \"router-default-5444994796-zbwhx\" (UID: \"18a1ac68-146b-4c80-a763-df4b75e6698d\") " pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826475 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40c7002a-571a-4c01-bfb7-a6bbf316a615-config\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826485 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/eba66557-699a-4be9-bc8e-fcedf6155f7e-images\") pod \"machine-api-operator-5694c8668f-pxdz7\" (UID: \"eba66557-699a-4be9-bc8e-fcedf6155f7e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pxdz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826494 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/03986f47-8037-41dd-a995-684a9296a676-mountpoint-dir\") pod \"csi-hostpathplugin-9g9fd\" (UID: \"03986f47-8037-41dd-a995-684a9296a676\") " pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826516 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6d834626-adda-4075-9196-bbf271b7b785-audit-dir\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826535 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fc3f1cee-b032-42c0-9996-8c1b815ad0f6-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-n485x\" (UID: \"fc3f1cee-b032-42c0-9996-8c1b815ad0f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826555 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bfcb44a8-0cab-4938-9f92-0fcc4227a662-proxy-tls\") pod \"machine-config-operator-74547568cd-8jrs2\" (UID: \"bfcb44a8-0cab-4938-9f92-0fcc4227a662\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826577 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826602 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826618 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdsd5\" (UniqueName: \"kubernetes.io/projected/a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee-kube-api-access-sdsd5\") pod \"packageserver-d55dfcdfc-fm4ct\" (UID: \"a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826633 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2010920b-32b8-4a16-9703-99191b3ac0ac-config\") pod \"service-ca-operator-777779d784-qr8zs\" (UID: \"2010920b-32b8-4a16-9703-99191b3ac0ac\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qr8zs" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826667 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e22d80c8-5706-475b-a385-00c22ad2eaea-client-ca\") pod \"controller-manager-879f6c89f-zpqmp\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826689 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826710 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfbdb\" (UniqueName: \"kubernetes.io/projected/7c493f67-7a6d-4685-afaf-be33fa220751-kube-api-access-vfbdb\") pod \"openshift-apiserver-operator-796bbdcf4f-77ck4\" (UID: \"7c493f67-7a6d-4685-afaf-be33fa220751\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77ck4" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826735 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b485r\" (UniqueName: \"kubernetes.io/projected/e22d80c8-5706-475b-a385-00c22ad2eaea-kube-api-access-b485r\") pod \"controller-manager-879f6c89f-zpqmp\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826776 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/56f8ea27-0443-4e0a-9dd0-2755b61f49f9-profile-collector-cert\") pod \"catalog-operator-68c6474976-rvh62\" (UID: \"56f8ea27-0443-4e0a-9dd0-2755b61f49f9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826802 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2240981f-6726-4fd9-b158-eae175371451-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8tnz7\" (UID: \"2240981f-6726-4fd9-b158-eae175371451\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826825 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/98125640-3946-46e2-aa2b-c70d9a04a8a1-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-j9ldq\" (UID: \"98125640-3946-46e2-aa2b-c70d9a04a8a1\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9ldq" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826858 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826881 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/30934d71-ae7e-491a-933a-f1667b3608e4-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-5jcnx\" (UID: \"30934d71-ae7e-491a-933a-f1667b3608e4\") " pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826904 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e22d80c8-5706-475b-a385-00c22ad2eaea-serving-cert\") pod \"controller-manager-879f6c89f-zpqmp\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826929 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6d834626-adda-4075-9196-bbf271b7b785-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826951 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/18a1ac68-146b-4c80-a763-df4b75e6698d-metrics-certs\") pod \"router-default-5444994796-zbwhx\" (UID: \"18a1ac68-146b-4c80-a763-df4b75e6698d\") " pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.826974 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4bab5b07-9cba-4b9f-9c8d-ce540f57347f-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-46bkn\" (UID: \"4bab5b07-9cba-4b9f-9c8d-ce540f57347f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-46bkn" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.827011 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.827035 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/fc3f1cee-b032-42c0-9996-8c1b815ad0f6-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-n485x\" (UID: \"fc3f1cee-b032-42c0-9996-8c1b815ad0f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.827059 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/95c3adf6-7fac-411c-9f54-9eea69052b94-certs\") pod \"machine-config-server-vwm2l\" (UID: \"95c3adf6-7fac-411c-9f54-9eea69052b94\") " pod="openshift-machine-config-operator/machine-config-server-vwm2l" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.827097 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-console-oauth-config\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.827140 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98125640-3946-46e2-aa2b-c70d9a04a8a1-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-j9ldq\" (UID: \"98125640-3946-46e2-aa2b-c70d9a04a8a1\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9ldq" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.827167 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5c497e5f-f362-48a4-bf34-833bfdc6de1b-installation-pull-secrets\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.827189 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e4655590-7cc2-4489-8a43-d897b47bdd45-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-pkcmd\" (UID: \"e4655590-7cc2-4489-8a43-d897b47bdd45\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pkcmd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.827217 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5c497e5f-f362-48a4-bf34-833bfdc6de1b-ca-trust-extracted\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.827255 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/03986f47-8037-41dd-a995-684a9296a676-plugins-dir\") pod \"csi-hostpathplugin-9g9fd\" (UID: \"03986f47-8037-41dd-a995-684a9296a676\") " pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.827280 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbkjx\" (UniqueName: \"kubernetes.io/projected/faa6eb3a-4dd5-4e99-83da-fdd167db88e7-kube-api-access-qbkjx\") pod \"machine-config-controller-84d6567774-tz5lk\" (UID: \"faa6eb3a-4dd5-4e99-83da-fdd167db88e7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-tz5lk" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.827304 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b0ba2c12-2e58-47ae-af8f-3a877929fee7-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-fqxp6\" (UID: \"b0ba2c12-2e58-47ae-af8f-3a877929fee7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fqxp6" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.827329 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e846921b-6840-420c-a782-fba505744883-serving-cert\") pod \"openshift-config-operator-7777fb866f-fvj5x\" (UID: \"e846921b-6840-420c-a782-fba505744883\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.827353 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d834626-adda-4075-9196-bbf271b7b785-serving-cert\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.827377 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eba66557-699a-4be9-bc8e-fcedf6155f7e-config\") pod \"machine-api-operator-5694c8668f-pxdz7\" (UID: \"eba66557-699a-4be9-bc8e-fcedf6155f7e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pxdz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.828381 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-service-ca\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.828439 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/e846921b-6840-420c-a782-fba505744883-available-featuregates\") pod \"openshift-config-operator-7777fb866f-fvj5x\" (UID: \"e846921b-6840-420c-a782-fba505744883\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830146 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/70c93db6-6e53-4870-92f3-e6335deb6936-config-volume\") pod \"dns-default-tcbhr\" (UID: \"70c93db6-6e53-4870-92f3-e6335deb6936\") " pod="openshift-dns/dns-default-tcbhr" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830455 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830486 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b2810366-b7c9-44b1-8b46-afba9c91937f-cert\") pod \"ingress-canary-ddcsr\" (UID: \"b2810366-b7c9-44b1-8b46-afba9c91937f\") " pod="openshift-ingress-canary/ingress-canary-ddcsr" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830510 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/75c61d4f-c7df-4f0f-b643-6bde1458075a-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-mfb8j\" (UID: \"75c61d4f-c7df-4f0f-b643-6bde1458075a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mfb8j" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830536 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/03986f47-8037-41dd-a995-684a9296a676-registration-dir\") pod \"csi-hostpathplugin-9g9fd\" (UID: \"03986f47-8037-41dd-a995-684a9296a676\") " pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830560 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4bab5b07-9cba-4b9f-9c8d-ce540f57347f-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-46bkn\" (UID: \"4bab5b07-9cba-4b9f-9c8d-ce540f57347f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-46bkn" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830583 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/40c7002a-571a-4c01-bfb7-a6bbf316a615-image-import-ca\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830610 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/916dbd05-6649-4ef0-9fdf-b2abe4ee3193-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-wlfw4\" (UID: \"916dbd05-6649-4ef0-9fdf-b2abe4ee3193\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wlfw4" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830634 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cd7d528d-143a-4093-a83a-510b9767a355-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-244qv\" (UID: \"cd7d528d-143a-4093-a83a-510b9767a355\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-244qv" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830656 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgrmg\" (UniqueName: \"kubernetes.io/projected/e4655590-7cc2-4489-8a43-d897b47bdd45-kube-api-access-cgrmg\") pod \"openshift-controller-manager-operator-756b6f6bc6-pkcmd\" (UID: \"e4655590-7cc2-4489-8a43-d897b47bdd45\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pkcmd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830677 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c493f67-7a6d-4685-afaf-be33fa220751-config\") pod \"openshift-apiserver-operator-796bbdcf4f-77ck4\" (UID: \"7c493f67-7a6d-4685-afaf-be33fa220751\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77ck4" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830699 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f2860ffb-7c3e-488f-af31-bfb8609a67d4-secret-volume\") pod \"collect-profiles-29405595-dbc4d\" (UID: \"f2860ffb-7c3e-488f-af31-bfb8609a67d4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830720 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rr9hh\" (UniqueName: \"kubernetes.io/projected/916dbd05-6649-4ef0-9fdf-b2abe4ee3193-kube-api-access-rr9hh\") pod \"package-server-manager-789f6589d5-wlfw4\" (UID: \"916dbd05-6649-4ef0-9fdf-b2abe4ee3193\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wlfw4" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830767 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5c497e5f-f362-48a4-bf34-833bfdc6de1b-registry-tls\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830792 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnt67\" (UniqueName: \"kubernetes.io/projected/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-kube-api-access-mnt67\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830814 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e22d80c8-5706-475b-a385-00c22ad2eaea-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zpqmp\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830838 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pq84n\" (UniqueName: \"kubernetes.io/projected/bfcb44a8-0cab-4938-9f92-0fcc4227a662-kube-api-access-pq84n\") pod \"machine-config-operator-74547568cd-8jrs2\" (UID: \"bfcb44a8-0cab-4938-9f92-0fcc4227a662\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830865 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5c497e5f-f362-48a4-bf34-833bfdc6de1b-registry-certificates\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830888 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-console-config\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.830911 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0eafa688-6c78-44bc-93de-6e300a65a036-audit-dir\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.831219 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eba66557-699a-4be9-bc8e-fcedf6155f7e-config\") pod \"machine-api-operator-5694c8668f-pxdz7\" (UID: \"eba66557-699a-4be9-bc8e-fcedf6155f7e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pxdz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.832322 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-console-serving-cert\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.832969 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e22d80c8-5706-475b-a385-00c22ad2eaea-client-ca\") pod \"controller-manager-879f6c89f-zpqmp\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.833138 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-trusted-ca-bundle\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.833250 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-oauth-serving-cert\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: E1128 13:20:46.833327 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:47.333314089 +0000 UTC m=+139.360689256 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.833642 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5c497e5f-f362-48a4-bf34-833bfdc6de1b-trusted-ca\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.834334 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/eba66557-699a-4be9-bc8e-fcedf6155f7e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-pxdz7\" (UID: \"eba66557-699a-4be9-bc8e-fcedf6155f7e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pxdz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.834407 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5c497e5f-f362-48a4-bf34-833bfdc6de1b-registry-certificates\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.835237 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e22d80c8-5706-475b-a385-00c22ad2eaea-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zpqmp\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.835846 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5c497e5f-f362-48a4-bf34-833bfdc6de1b-ca-trust-extracted\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.836433 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e22d80c8-5706-475b-a385-00c22ad2eaea-config\") pod \"controller-manager-879f6c89f-zpqmp\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.837204 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-console-config\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.837790 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e22d80c8-5706-475b-a385-00c22ad2eaea-serving-cert\") pod \"controller-manager-879f6c89f-zpqmp\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.838493 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5c497e5f-f362-48a4-bf34-833bfdc6de1b-registry-tls\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.839108 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-console-oauth-config\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.842401 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e846921b-6840-420c-a782-fba505744883-serving-cert\") pod \"openshift-config-operator-7777fb866f-fvj5x\" (UID: \"e846921b-6840-420c-a782-fba505744883\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.842532 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5c497e5f-f362-48a4-bf34-833bfdc6de1b-installation-pull-secrets\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.864149 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wp69c\" (UniqueName: \"kubernetes.io/projected/e846921b-6840-420c-a782-fba505744883-kube-api-access-wp69c\") pod \"openshift-config-operator-7777fb866f-fvj5x\" (UID: \"e846921b-6840-420c-a782-fba505744883\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.880525 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f64gl\" (UniqueName: \"kubernetes.io/projected/5c497e5f-f362-48a4-bf34-833bfdc6de1b-kube-api-access-f64gl\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.903621 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5c497e5f-f362-48a4-bf34-833bfdc6de1b-bound-sa-token\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.931555 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.931698 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2010920b-32b8-4a16-9703-99191b3ac0ac-serving-cert\") pod \"service-ca-operator-777779d784-qr8zs\" (UID: \"2010920b-32b8-4a16-9703-99191b3ac0ac\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qr8zs" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.931720 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ca56a139-a9ad-46bf-a094-435eef021799-trusted-ca\") pod \"ingress-operator-5b745b69d9-rl4qj\" (UID: \"ca56a139-a9ad-46bf-a094-435eef021799\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.931736 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/56f8ea27-0443-4e0a-9dd0-2755b61f49f9-srv-cert\") pod \"catalog-operator-68c6474976-rvh62\" (UID: \"56f8ea27-0443-4e0a-9dd0-2755b61f49f9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.931770 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/de0c7377-d49e-4651-b681-cddd455fd280-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-hvs7l\" (UID: \"de0c7377-d49e-4651-b681-cddd455fd280\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-hvs7l" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.931791 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/40c7002a-571a-4c01-bfb7-a6bbf316a615-audit\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.931810 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhzgn\" (UniqueName: \"kubernetes.io/projected/ca56a139-a9ad-46bf-a094-435eef021799-kube-api-access-lhzgn\") pod \"ingress-operator-5b745b69d9-rl4qj\" (UID: \"ca56a139-a9ad-46bf-a094-435eef021799\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.931827 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/30934d71-ae7e-491a-933a-f1667b3608e4-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-5jcnx\" (UID: \"30934d71-ae7e-491a-933a-f1667b3608e4\") " pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.931845 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zmdv\" (UniqueName: \"kubernetes.io/projected/0eafa688-6c78-44bc-93de-6e300a65a036-kube-api-access-2zmdv\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.931860 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cd7d528d-143a-4093-a83a-510b9767a355-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-244qv\" (UID: \"cd7d528d-143a-4093-a83a-510b9767a355\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-244qv" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.931874 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/40c7002a-571a-4c01-bfb7-a6bbf316a615-serving-cert\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.931901 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/40c7002a-571a-4c01-bfb7-a6bbf316a615-etcd-serving-ca\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.931914 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wx2p\" (UniqueName: \"kubernetes.io/projected/40c7002a-571a-4c01-bfb7-a6bbf316a615-kube-api-access-6wx2p\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.931933 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwsbb\" (UniqueName: \"kubernetes.io/projected/18a1ac68-146b-4c80-a763-df4b75e6698d-kube-api-access-mwsbb\") pod \"router-default-5444994796-zbwhx\" (UID: \"18a1ac68-146b-4c80-a763-df4b75e6698d\") " pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.931954 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/03986f47-8037-41dd-a995-684a9296a676-socket-dir\") pod \"csi-hostpathplugin-9g9fd\" (UID: \"03986f47-8037-41dd-a995-684a9296a676\") " pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932002 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bfcb44a8-0cab-4938-9f92-0fcc4227a662-auth-proxy-config\") pod \"machine-config-operator-74547568cd-8jrs2\" (UID: \"bfcb44a8-0cab-4938-9f92-0fcc4227a662\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932023 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ca56a139-a9ad-46bf-a094-435eef021799-bound-sa-token\") pod \"ingress-operator-5b745b69d9-rl4qj\" (UID: \"ca56a139-a9ad-46bf-a094-435eef021799\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932086 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/40c7002a-571a-4c01-bfb7-a6bbf316a615-node-pullsecrets\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932104 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0ba2c12-2e58-47ae-af8f-3a877929fee7-config\") pod \"kube-apiserver-operator-766d6c64bb-fqxp6\" (UID: \"b0ba2c12-2e58-47ae-af8f-3a877929fee7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fqxp6" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932119 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/219e3c3b-f767-461c-8573-8c2d8da52328-signing-cabundle\") pod \"service-ca-9c57cc56f-slgpm\" (UID: \"219e3c3b-f767-461c-8573-8c2d8da52328\") " pod="openshift-service-ca/service-ca-9c57cc56f-slgpm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932136 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b008b6be-e0fb-4486-9543-1343d458badd-metrics-tls\") pod \"dns-operator-744455d44c-gqtr2\" (UID: \"b008b6be-e0fb-4486-9543-1343d458badd\") " pod="openshift-dns-operator/dns-operator-744455d44c-gqtr2" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932152 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6f2s7\" (UniqueName: \"kubernetes.io/projected/6d834626-adda-4075-9196-bbf271b7b785-kube-api-access-6f2s7\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932168 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932185 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qd2h\" (UniqueName: \"kubernetes.io/projected/a0ce2b1a-313b-400a-b4e4-2b4b32b1785a-kube-api-access-2qd2h\") pod \"migrator-59844c95c7-w9chp\" (UID: \"a0ce2b1a-313b-400a-b4e4-2b4b32b1785a\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-w9chp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932202 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rswrc\" (UniqueName: \"kubernetes.io/projected/de0c7377-d49e-4651-b681-cddd455fd280-kube-api-access-rswrc\") pod \"multus-admission-controller-857f4d67dd-hvs7l\" (UID: \"de0c7377-d49e-4651-b681-cddd455fd280\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-hvs7l" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932218 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6d834626-adda-4075-9196-bbf271b7b785-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932234 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6d834626-adda-4075-9196-bbf271b7b785-etcd-client\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932248 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/219e3c3b-f767-461c-8573-8c2d8da52328-signing-key\") pod \"service-ca-9c57cc56f-slgpm\" (UID: \"219e3c3b-f767-461c-8573-8c2d8da52328\") " pod="openshift-service-ca/service-ca-9c57cc56f-slgpm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932265 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmwcv\" (UniqueName: \"kubernetes.io/projected/f2860ffb-7c3e-488f-af31-bfb8609a67d4-kube-api-access-qmwcv\") pod \"collect-profiles-29405595-dbc4d\" (UID: \"f2860ffb-7c3e-488f-af31-bfb8609a67d4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932280 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fq5hl\" (UniqueName: \"kubernetes.io/projected/2010920b-32b8-4a16-9703-99191b3ac0ac-kube-api-access-fq5hl\") pod \"service-ca-operator-777779d784-qr8zs\" (UID: \"2010920b-32b8-4a16-9703-99191b3ac0ac\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qr8zs" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932296 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c493f67-7a6d-4685-afaf-be33fa220751-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-77ck4\" (UID: \"7c493f67-7a6d-4685-afaf-be33fa220751\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77ck4" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932314 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6d834626-adda-4075-9196-bbf271b7b785-audit-policies\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932329 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ca56a139-a9ad-46bf-a094-435eef021799-metrics-tls\") pod \"ingress-operator-5b745b69d9-rl4qj\" (UID: \"ca56a139-a9ad-46bf-a094-435eef021799\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932345 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkv84\" (UniqueName: \"kubernetes.io/projected/98125640-3946-46e2-aa2b-c70d9a04a8a1-kube-api-access-jkv84\") pod \"kube-storage-version-migrator-operator-b67b599dd-j9ldq\" (UID: \"98125640-3946-46e2-aa2b-c70d9a04a8a1\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9ldq" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932361 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dph7p\" (UniqueName: \"kubernetes.io/projected/219e3c3b-f767-461c-8573-8c2d8da52328-kube-api-access-dph7p\") pod \"service-ca-9c57cc56f-slgpm\" (UID: \"219e3c3b-f767-461c-8573-8c2d8da52328\") " pod="openshift-service-ca/service-ca-9c57cc56f-slgpm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932377 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vw8wb\" (UniqueName: \"kubernetes.io/projected/b2810366-b7c9-44b1-8b46-afba9c91937f-kube-api-access-vw8wb\") pod \"ingress-canary-ddcsr\" (UID: \"b2810366-b7c9-44b1-8b46-afba9c91937f\") " pod="openshift-ingress-canary/ingress-canary-ddcsr" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932394 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932407 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/40c7002a-571a-4c01-bfb7-a6bbf316a615-encryption-config\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932421 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/bfcb44a8-0cab-4938-9f92-0fcc4227a662-images\") pod \"machine-config-operator-74547568cd-8jrs2\" (UID: \"bfcb44a8-0cab-4938-9f92-0fcc4227a662\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932438 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932468 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd7d528d-143a-4093-a83a-510b9767a355-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-244qv\" (UID: \"cd7d528d-143a-4093-a83a-510b9767a355\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-244qv" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932485 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6b4q\" (UniqueName: \"kubernetes.io/projected/fc3f1cee-b032-42c0-9996-8c1b815ad0f6-kube-api-access-f6b4q\") pod \"cluster-image-registry-operator-dc59b4c8b-n485x\" (UID: \"fc3f1cee-b032-42c0-9996-8c1b815ad0f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932500 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-966xg\" (UniqueName: \"kubernetes.io/projected/95c3adf6-7fac-411c-9f54-9eea69052b94-kube-api-access-966xg\") pod \"machine-config-server-vwm2l\" (UID: \"95c3adf6-7fac-411c-9f54-9eea69052b94\") " pod="openshift-machine-config-operator/machine-config-server-vwm2l" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932516 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hn7rn\" (UniqueName: \"kubernetes.io/projected/56f8ea27-0443-4e0a-9dd0-2755b61f49f9-kube-api-access-hn7rn\") pod \"catalog-operator-68c6474976-rvh62\" (UID: \"56f8ea27-0443-4e0a-9dd0-2755b61f49f9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932537 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4bab5b07-9cba-4b9f-9c8d-ce540f57347f-config\") pod \"kube-controller-manager-operator-78b949d7b-46bkn\" (UID: \"4bab5b07-9cba-4b9f-9c8d-ce540f57347f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-46bkn" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932551 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/40c7002a-571a-4c01-bfb7-a6bbf316a615-trusted-ca-bundle\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932565 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f2860ffb-7c3e-488f-af31-bfb8609a67d4-config-volume\") pod \"collect-profiles-29405595-dbc4d\" (UID: \"f2860ffb-7c3e-488f-af31-bfb8609a67d4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932579 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/18a1ac68-146b-4c80-a763-df4b75e6698d-service-ca-bundle\") pod \"router-default-5444994796-zbwhx\" (UID: \"18a1ac68-146b-4c80-a763-df4b75e6698d\") " pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932594 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40c7002a-571a-4c01-bfb7-a6bbf316a615-config\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932608 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/03986f47-8037-41dd-a995-684a9296a676-mountpoint-dir\") pod \"csi-hostpathplugin-9g9fd\" (UID: \"03986f47-8037-41dd-a995-684a9296a676\") " pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932689 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6d834626-adda-4075-9196-bbf271b7b785-audit-dir\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932708 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fc3f1cee-b032-42c0-9996-8c1b815ad0f6-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-n485x\" (UID: \"fc3f1cee-b032-42c0-9996-8c1b815ad0f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.932723 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bfcb44a8-0cab-4938-9f92-0fcc4227a662-proxy-tls\") pod \"machine-config-operator-74547568cd-8jrs2\" (UID: \"bfcb44a8-0cab-4938-9f92-0fcc4227a662\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.933467 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.933485 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.933521 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.933565 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfbdb\" (UniqueName: \"kubernetes.io/projected/7c493f67-7a6d-4685-afaf-be33fa220751-kube-api-access-vfbdb\") pod \"openshift-apiserver-operator-796bbdcf4f-77ck4\" (UID: \"7c493f67-7a6d-4685-afaf-be33fa220751\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77ck4" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.933583 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdsd5\" (UniqueName: \"kubernetes.io/projected/a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee-kube-api-access-sdsd5\") pod \"packageserver-d55dfcdfc-fm4ct\" (UID: \"a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.933598 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2010920b-32b8-4a16-9703-99191b3ac0ac-config\") pod \"service-ca-operator-777779d784-qr8zs\" (UID: \"2010920b-32b8-4a16-9703-99191b3ac0ac\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qr8zs" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.933623 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/56f8ea27-0443-4e0a-9dd0-2755b61f49f9-profile-collector-cert\") pod \"catalog-operator-68c6474976-rvh62\" (UID: \"56f8ea27-0443-4e0a-9dd0-2755b61f49f9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.933678 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2240981f-6726-4fd9-b158-eae175371451-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8tnz7\" (UID: \"2240981f-6726-4fd9-b158-eae175371451\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.933694 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/98125640-3946-46e2-aa2b-c70d9a04a8a1-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-j9ldq\" (UID: \"98125640-3946-46e2-aa2b-c70d9a04a8a1\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9ldq" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.933719 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/30934d71-ae7e-491a-933a-f1667b3608e4-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-5jcnx\" (UID: \"30934d71-ae7e-491a-933a-f1667b3608e4\") " pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.933894 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6d834626-adda-4075-9196-bbf271b7b785-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934215 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/18a1ac68-146b-4c80-a763-df4b75e6698d-metrics-certs\") pod \"router-default-5444994796-zbwhx\" (UID: \"18a1ac68-146b-4c80-a763-df4b75e6698d\") " pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934263 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4bab5b07-9cba-4b9f-9c8d-ce540f57347f-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-46bkn\" (UID: \"4bab5b07-9cba-4b9f-9c8d-ce540f57347f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-46bkn" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934278 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/95c3adf6-7fac-411c-9f54-9eea69052b94-certs\") pod \"machine-config-server-vwm2l\" (UID: \"95c3adf6-7fac-411c-9f54-9eea69052b94\") " pod="openshift-machine-config-operator/machine-config-server-vwm2l" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934296 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934311 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/fc3f1cee-b032-42c0-9996-8c1b815ad0f6-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-n485x\" (UID: \"fc3f1cee-b032-42c0-9996-8c1b815ad0f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934329 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98125640-3946-46e2-aa2b-c70d9a04a8a1-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-j9ldq\" (UID: \"98125640-3946-46e2-aa2b-c70d9a04a8a1\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9ldq" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934348 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e4655590-7cc2-4489-8a43-d897b47bdd45-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-pkcmd\" (UID: \"e4655590-7cc2-4489-8a43-d897b47bdd45\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pkcmd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934365 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/03986f47-8037-41dd-a995-684a9296a676-plugins-dir\") pod \"csi-hostpathplugin-9g9fd\" (UID: \"03986f47-8037-41dd-a995-684a9296a676\") " pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934381 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbkjx\" (UniqueName: \"kubernetes.io/projected/faa6eb3a-4dd5-4e99-83da-fdd167db88e7-kube-api-access-qbkjx\") pod \"machine-config-controller-84d6567774-tz5lk\" (UID: \"faa6eb3a-4dd5-4e99-83da-fdd167db88e7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-tz5lk" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934399 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d834626-adda-4075-9196-bbf271b7b785-serving-cert\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934415 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b0ba2c12-2e58-47ae-af8f-3a877929fee7-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-fqxp6\" (UID: \"b0ba2c12-2e58-47ae-af8f-3a877929fee7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fqxp6" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934432 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/70c93db6-6e53-4870-92f3-e6335deb6936-config-volume\") pod \"dns-default-tcbhr\" (UID: \"70c93db6-6e53-4870-92f3-e6335deb6936\") " pod="openshift-dns/dns-default-tcbhr" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934450 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934466 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b2810366-b7c9-44b1-8b46-afba9c91937f-cert\") pod \"ingress-canary-ddcsr\" (UID: \"b2810366-b7c9-44b1-8b46-afba9c91937f\") " pod="openshift-ingress-canary/ingress-canary-ddcsr" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934482 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/75c61d4f-c7df-4f0f-b643-6bde1458075a-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-mfb8j\" (UID: \"75c61d4f-c7df-4f0f-b643-6bde1458075a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mfb8j" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934498 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/03986f47-8037-41dd-a995-684a9296a676-registration-dir\") pod \"csi-hostpathplugin-9g9fd\" (UID: \"03986f47-8037-41dd-a995-684a9296a676\") " pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934516 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4bab5b07-9cba-4b9f-9c8d-ce540f57347f-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-46bkn\" (UID: \"4bab5b07-9cba-4b9f-9c8d-ce540f57347f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-46bkn" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934532 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/40c7002a-571a-4c01-bfb7-a6bbf316a615-image-import-ca\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934548 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/916dbd05-6649-4ef0-9fdf-b2abe4ee3193-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-wlfw4\" (UID: \"916dbd05-6649-4ef0-9fdf-b2abe4ee3193\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wlfw4" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934566 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cd7d528d-143a-4093-a83a-510b9767a355-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-244qv\" (UID: \"cd7d528d-143a-4093-a83a-510b9767a355\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-244qv" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934583 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgrmg\" (UniqueName: \"kubernetes.io/projected/e4655590-7cc2-4489-8a43-d897b47bdd45-kube-api-access-cgrmg\") pod \"openshift-controller-manager-operator-756b6f6bc6-pkcmd\" (UID: \"e4655590-7cc2-4489-8a43-d897b47bdd45\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pkcmd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934600 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c493f67-7a6d-4685-afaf-be33fa220751-config\") pod \"openshift-apiserver-operator-796bbdcf4f-77ck4\" (UID: \"7c493f67-7a6d-4685-afaf-be33fa220751\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77ck4" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934615 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f2860ffb-7c3e-488f-af31-bfb8609a67d4-secret-volume\") pod \"collect-profiles-29405595-dbc4d\" (UID: \"f2860ffb-7c3e-488f-af31-bfb8609a67d4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934631 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rr9hh\" (UniqueName: \"kubernetes.io/projected/916dbd05-6649-4ef0-9fdf-b2abe4ee3193-kube-api-access-rr9hh\") pod \"package-server-manager-789f6589d5-wlfw4\" (UID: \"916dbd05-6649-4ef0-9fdf-b2abe4ee3193\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wlfw4" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934653 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0eafa688-6c78-44bc-93de-6e300a65a036-audit-dir\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934669 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pq84n\" (UniqueName: \"kubernetes.io/projected/bfcb44a8-0cab-4938-9f92-0fcc4227a662-kube-api-access-pq84n\") pod \"machine-config-operator-74547568cd-8jrs2\" (UID: \"bfcb44a8-0cab-4938-9f92-0fcc4227a662\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934688 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/18a1ac68-146b-4c80-a763-df4b75e6698d-stats-auth\") pod \"router-default-5444994796-zbwhx\" (UID: \"18a1ac68-146b-4c80-a763-df4b75e6698d\") " pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934705 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c889x\" (UniqueName: \"kubernetes.io/projected/b008b6be-e0fb-4486-9543-1343d458badd-kube-api-access-c889x\") pod \"dns-operator-744455d44c-gqtr2\" (UID: \"b008b6be-e0fb-4486-9543-1343d458badd\") " pod="openshift-dns-operator/dns-operator-744455d44c-gqtr2" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934719 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6d834626-adda-4075-9196-bbf271b7b785-encryption-config\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934736 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934778 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wslz\" (UniqueName: \"kubernetes.io/projected/03986f47-8037-41dd-a995-684a9296a676-kube-api-access-7wslz\") pod \"csi-hostpathplugin-9g9fd\" (UID: \"03986f47-8037-41dd-a995-684a9296a676\") " pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934800 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/18a1ac68-146b-4c80-a763-df4b75e6698d-default-certificate\") pod \"router-default-5444994796-zbwhx\" (UID: \"18a1ac68-146b-4c80-a763-df4b75e6698d\") " pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934817 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8f67g\" (UniqueName: \"kubernetes.io/projected/70c93db6-6e53-4870-92f3-e6335deb6936-kube-api-access-8f67g\") pod \"dns-default-tcbhr\" (UID: \"70c93db6-6e53-4870-92f3-e6335deb6936\") " pod="openshift-dns/dns-default-tcbhr" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934831 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2240981f-6726-4fd9-b158-eae175371451-srv-cert\") pod \"olm-operator-6b444d44fb-8tnz7\" (UID: \"2240981f-6726-4fd9-b158-eae175371451\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934847 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934862 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/40c7002a-571a-4c01-bfb7-a6bbf316a615-etcd-client\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934876 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee-webhook-cert\") pod \"packageserver-d55dfcdfc-fm4ct\" (UID: \"a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934891 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/40c7002a-571a-4c01-bfb7-a6bbf316a615-audit-dir\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934905 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee-apiservice-cert\") pod \"packageserver-d55dfcdfc-fm4ct\" (UID: \"a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934923 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/faa6eb3a-4dd5-4e99-83da-fdd167db88e7-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-tz5lk\" (UID: \"faa6eb3a-4dd5-4e99-83da-fdd167db88e7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-tz5lk" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934939 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/03986f47-8037-41dd-a995-684a9296a676-csi-data-dir\") pod \"csi-hostpathplugin-9g9fd\" (UID: \"03986f47-8037-41dd-a995-684a9296a676\") " pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934954 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/95c3adf6-7fac-411c-9f54-9eea69052b94-node-bootstrap-token\") pod \"machine-config-server-vwm2l\" (UID: \"95c3adf6-7fac-411c-9f54-9eea69052b94\") " pod="openshift-machine-config-operator/machine-config-server-vwm2l" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934969 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wcsn\" (UniqueName: \"kubernetes.io/projected/75c61d4f-c7df-4f0f-b643-6bde1458075a-kube-api-access-6wcsn\") pod \"control-plane-machine-set-operator-78cbb6b69f-mfb8j\" (UID: \"75c61d4f-c7df-4f0f-b643-6bde1458075a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mfb8j" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.934985 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cz4z\" (UniqueName: \"kubernetes.io/projected/30934d71-ae7e-491a-933a-f1667b3608e4-kube-api-access-8cz4z\") pod \"marketplace-operator-79b997595-5jcnx\" (UID: \"30934d71-ae7e-491a-933a-f1667b3608e4\") " pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.935040 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/faa6eb3a-4dd5-4e99-83da-fdd167db88e7-proxy-tls\") pod \"machine-config-controller-84d6567774-tz5lk\" (UID: \"faa6eb3a-4dd5-4e99-83da-fdd167db88e7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-tz5lk" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.935060 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hknj5\" (UniqueName: \"kubernetes.io/projected/2240981f-6726-4fd9-b158-eae175371451-kube-api-access-hknj5\") pod \"olm-operator-6b444d44fb-8tnz7\" (UID: \"2240981f-6726-4fd9-b158-eae175371451\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.935086 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4655590-7cc2-4489-8a43-d897b47bdd45-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-pkcmd\" (UID: \"e4655590-7cc2-4489-8a43-d897b47bdd45\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pkcmd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.935113 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fc3f1cee-b032-42c0-9996-8c1b815ad0f6-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-n485x\" (UID: \"fc3f1cee-b032-42c0-9996-8c1b815ad0f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.935131 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b0ba2c12-2e58-47ae-af8f-3a877929fee7-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-fqxp6\" (UID: \"b0ba2c12-2e58-47ae-af8f-3a877929fee7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fqxp6" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.935157 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.935176 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-audit-policies\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.935192 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee-tmpfs\") pod \"packageserver-d55dfcdfc-fm4ct\" (UID: \"a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.935207 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/70c93db6-6e53-4870-92f3-e6335deb6936-metrics-tls\") pod \"dns-default-tcbhr\" (UID: \"70c93db6-6e53-4870-92f3-e6335deb6936\") " pod="openshift-dns/dns-default-tcbhr" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.936221 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-audit-policies\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.936619 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee-tmpfs\") pod \"packageserver-d55dfcdfc-fm4ct\" (UID: \"a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.936831 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fc3f1cee-b032-42c0-9996-8c1b815ad0f6-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-n485x\" (UID: \"fc3f1cee-b032-42c0-9996-8c1b815ad0f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.937599 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4655590-7cc2-4489-8a43-d897b47bdd45-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-pkcmd\" (UID: \"e4655590-7cc2-4489-8a43-d897b47bdd45\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pkcmd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.938006 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: E1128 13:20:46.938097 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:47.438070177 +0000 UTC m=+139.465445364 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.938140 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0eafa688-6c78-44bc-93de-6e300a65a036-audit-dir\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.938475 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/40c7002a-571a-4c01-bfb7-a6bbf316a615-audit-dir\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.938696 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd7d528d-143a-4093-a83a-510b9767a355-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-244qv\" (UID: \"cd7d528d-143a-4093-a83a-510b9767a355\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-244qv" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.939436 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.939517 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40c7002a-571a-4c01-bfb7-a6bbf316a615-config\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.939593 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/03986f47-8037-41dd-a995-684a9296a676-mountpoint-dir\") pod \"csi-hostpathplugin-9g9fd\" (UID: \"03986f47-8037-41dd-a995-684a9296a676\") " pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.939638 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6d834626-adda-4075-9196-bbf271b7b785-audit-dir\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.939698 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.939966 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/18a1ac68-146b-4c80-a763-df4b75e6698d-service-ca-bundle\") pod \"router-default-5444994796-zbwhx\" (UID: \"18a1ac68-146b-4c80-a763-df4b75e6698d\") " pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.940124 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/03986f47-8037-41dd-a995-684a9296a676-socket-dir\") pod \"csi-hostpathplugin-9g9fd\" (UID: \"03986f47-8037-41dd-a995-684a9296a676\") " pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.940512 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ca56a139-a9ad-46bf-a094-435eef021799-trusted-ca\") pod \"ingress-operator-5b745b69d9-rl4qj\" (UID: \"ca56a139-a9ad-46bf-a094-435eef021799\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.941254 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.941280 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f2860ffb-7c3e-488f-af31-bfb8609a67d4-secret-volume\") pod \"collect-profiles-29405595-dbc4d\" (UID: \"f2860ffb-7c3e-488f-af31-bfb8609a67d4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.941301 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/40c7002a-571a-4c01-bfb7-a6bbf316a615-image-import-ca\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.941393 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/faa6eb3a-4dd5-4e99-83da-fdd167db88e7-proxy-tls\") pod \"machine-config-controller-84d6567774-tz5lk\" (UID: \"faa6eb3a-4dd5-4e99-83da-fdd167db88e7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-tz5lk" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.941982 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bfcb44a8-0cab-4938-9f92-0fcc4227a662-auth-proxy-config\") pod \"machine-config-operator-74547568cd-8jrs2\" (UID: \"bfcb44a8-0cab-4938-9f92-0fcc4227a662\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.942257 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/40c7002a-571a-4c01-bfb7-a6bbf316a615-node-pullsecrets\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.943074 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0ba2c12-2e58-47ae-af8f-3a877929fee7-config\") pod \"kube-apiserver-operator-766d6c64bb-fqxp6\" (UID: \"b0ba2c12-2e58-47ae-af8f-3a877929fee7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fqxp6" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.943263 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/40c7002a-571a-4c01-bfb7-a6bbf316a615-trusted-ca-bundle\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.943307 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/faa6eb3a-4dd5-4e99-83da-fdd167db88e7-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-tz5lk\" (UID: \"faa6eb3a-4dd5-4e99-83da-fdd167db88e7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-tz5lk" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.943962 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/40c7002a-571a-4c01-bfb7-a6bbf316a615-audit\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.944134 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/219e3c3b-f767-461c-8573-8c2d8da52328-signing-cabundle\") pod \"service-ca-9c57cc56f-slgpm\" (UID: \"219e3c3b-f767-461c-8573-8c2d8da52328\") " pod="openshift-service-ca/service-ca-9c57cc56f-slgpm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.945484 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/03986f47-8037-41dd-a995-684a9296a676-csi-data-dir\") pod \"csi-hostpathplugin-9g9fd\" (UID: \"03986f47-8037-41dd-a995-684a9296a676\") " pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.945798 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f2860ffb-7c3e-488f-af31-bfb8609a67d4-config-volume\") pod \"collect-profiles-29405595-dbc4d\" (UID: \"f2860ffb-7c3e-488f-af31-bfb8609a67d4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.946118 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.946259 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/916dbd05-6649-4ef0-9fdf-b2abe4ee3193-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-wlfw4\" (UID: \"916dbd05-6649-4ef0-9fdf-b2abe4ee3193\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wlfw4" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.946446 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/40c7002a-571a-4c01-bfb7-a6bbf316a615-etcd-serving-ca\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.946625 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/de0c7377-d49e-4651-b681-cddd455fd280-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-hvs7l\" (UID: \"de0c7377-d49e-4651-b681-cddd455fd280\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-hvs7l" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.946946 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bfcb44a8-0cab-4938-9f92-0fcc4227a662-proxy-tls\") pod \"machine-config-operator-74547568cd-8jrs2\" (UID: \"bfcb44a8-0cab-4938-9f92-0fcc4227a662\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.947007 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6d834626-adda-4075-9196-bbf271b7b785-audit-policies\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.947146 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/03986f47-8037-41dd-a995-684a9296a676-plugins-dir\") pod \"csi-hostpathplugin-9g9fd\" (UID: \"03986f47-8037-41dd-a995-684a9296a676\") " pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.947224 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6d834626-adda-4075-9196-bbf271b7b785-etcd-client\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.947401 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6d834626-adda-4075-9196-bbf271b7b785-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.947521 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98125640-3946-46e2-aa2b-c70d9a04a8a1-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-j9ldq\" (UID: \"98125640-3946-46e2-aa2b-c70d9a04a8a1\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9ldq" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.947783 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/70c93db6-6e53-4870-92f3-e6335deb6936-metrics-tls\") pod \"dns-default-tcbhr\" (UID: \"70c93db6-6e53-4870-92f3-e6335deb6936\") " pod="openshift-dns/dns-default-tcbhr" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.948203 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c493f67-7a6d-4685-afaf-be33fa220751-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-77ck4\" (UID: \"7c493f67-7a6d-4685-afaf-be33fa220751\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77ck4" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.948225 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2010920b-32b8-4a16-9703-99191b3ac0ac-config\") pod \"service-ca-operator-777779d784-qr8zs\" (UID: \"2010920b-32b8-4a16-9703-99191b3ac0ac\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qr8zs" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.948405 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/40c7002a-571a-4c01-bfb7-a6bbf316a615-etcd-client\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.948461 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4bab5b07-9cba-4b9f-9c8d-ce540f57347f-config\") pod \"kube-controller-manager-operator-78b949d7b-46bkn\" (UID: \"4bab5b07-9cba-4b9f-9c8d-ce540f57347f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-46bkn" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.948728 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/95c3adf6-7fac-411c-9f54-9eea69052b94-node-bootstrap-token\") pod \"machine-config-server-vwm2l\" (UID: \"95c3adf6-7fac-411c-9f54-9eea69052b94\") " pod="openshift-machine-config-operator/machine-config-server-vwm2l" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.948848 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/56f8ea27-0443-4e0a-9dd0-2755b61f49f9-srv-cert\") pod \"catalog-operator-68c6474976-rvh62\" (UID: \"56f8ea27-0443-4e0a-9dd0-2755b61f49f9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.952683 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/40c7002a-571a-4c01-bfb7-a6bbf316a615-serving-cert\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.952888 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2240981f-6726-4fd9-b158-eae175371451-srv-cert\") pod \"olm-operator-6b444d44fb-8tnz7\" (UID: \"2240981f-6726-4fd9-b158-eae175371451\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.953306 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4bab5b07-9cba-4b9f-9c8d-ce540f57347f-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-46bkn\" (UID: \"4bab5b07-9cba-4b9f-9c8d-ce540f57347f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-46bkn" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.953377 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.953415 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee-webhook-cert\") pod \"packageserver-d55dfcdfc-fm4ct\" (UID: \"a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.953456 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cd7d528d-143a-4093-a83a-510b9767a355-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-244qv\" (UID: \"cd7d528d-143a-4093-a83a-510b9767a355\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-244qv" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.954024 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e4655590-7cc2-4489-8a43-d897b47bdd45-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-pkcmd\" (UID: \"e4655590-7cc2-4489-8a43-d897b47bdd45\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pkcmd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.954815 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/fc3f1cee-b032-42c0-9996-8c1b815ad0f6-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-n485x\" (UID: \"fc3f1cee-b032-42c0-9996-8c1b815ad0f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.956372 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/18a1ac68-146b-4c80-a763-df4b75e6698d-default-certificate\") pod \"router-default-5444994796-zbwhx\" (UID: \"18a1ac68-146b-4c80-a763-df4b75e6698d\") " pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.956675 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b008b6be-e0fb-4486-9543-1343d458badd-metrics-tls\") pod \"dns-operator-744455d44c-gqtr2\" (UID: \"b008b6be-e0fb-4486-9543-1343d458badd\") " pod="openshift-dns-operator/dns-operator-744455d44c-gqtr2" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.957281 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d834626-adda-4075-9196-bbf271b7b785-serving-cert\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.957506 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ca56a139-a9ad-46bf-a094-435eef021799-metrics-tls\") pod \"ingress-operator-5b745b69d9-rl4qj\" (UID: \"ca56a139-a9ad-46bf-a094-435eef021799\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.972590 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6d834626-adda-4075-9196-bbf271b7b785-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.972964 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6d834626-adda-4075-9196-bbf271b7b785-encryption-config\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.973011 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.973564 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/18a1ac68-146b-4c80-a763-df4b75e6698d-stats-auth\") pod \"router-default-5444994796-zbwhx\" (UID: \"18a1ac68-146b-4c80-a763-df4b75e6698d\") " pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.973716 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2010920b-32b8-4a16-9703-99191b3ac0ac-serving-cert\") pod \"service-ca-operator-777779d784-qr8zs\" (UID: \"2010920b-32b8-4a16-9703-99191b3ac0ac\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qr8zs" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.974116 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee-apiservice-cert\") pod \"packageserver-d55dfcdfc-fm4ct\" (UID: \"a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.974871 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnt67\" (UniqueName: \"kubernetes.io/projected/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-kube-api-access-mnt67\") pod \"console-f9d7485db-7plbl\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.975407 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2240981f-6726-4fd9-b158-eae175371451-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8tnz7\" (UID: \"2240981f-6726-4fd9-b158-eae175371451\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.975730 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.975847 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.976242 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.976414 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/30934d71-ae7e-491a-933a-f1667b3608e4-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-5jcnx\" (UID: \"30934d71-ae7e-491a-933a-f1667b3608e4\") " pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.976529 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/219e3c3b-f767-461c-8573-8c2d8da52328-signing-key\") pod \"service-ca-9c57cc56f-slgpm\" (UID: \"219e3c3b-f767-461c-8573-8c2d8da52328\") " pod="openshift-service-ca/service-ca-9c57cc56f-slgpm" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.977503 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/bfcb44a8-0cab-4938-9f92-0fcc4227a662-images\") pod \"machine-config-operator-74547568cd-8jrs2\" (UID: \"bfcb44a8-0cab-4938-9f92-0fcc4227a662\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.977530 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/56f8ea27-0443-4e0a-9dd0-2755b61f49f9-profile-collector-cert\") pod \"catalog-operator-68c6474976-rvh62\" (UID: \"56f8ea27-0443-4e0a-9dd0-2755b61f49f9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.980810 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.980967 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/03986f47-8037-41dd-a995-684a9296a676-registration-dir\") pod \"csi-hostpathplugin-9g9fd\" (UID: \"03986f47-8037-41dd-a995-684a9296a676\") " pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.982206 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b0ba2c12-2e58-47ae-af8f-3a877929fee7-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-fqxp6\" (UID: \"b0ba2c12-2e58-47ae-af8f-3a877929fee7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fqxp6" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.982202 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/70c93db6-6e53-4870-92f3-e6335deb6936-config-volume\") pod \"dns-default-tcbhr\" (UID: \"70c93db6-6e53-4870-92f3-e6335deb6936\") " pod="openshift-dns/dns-default-tcbhr" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.982644 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/40c7002a-571a-4c01-bfb7-a6bbf316a615-encryption-config\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.983302 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b485r\" (UniqueName: \"kubernetes.io/projected/e22d80c8-5706-475b-a385-00c22ad2eaea-kube-api-access-b485r\") pod \"controller-manager-879f6c89f-zpqmp\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.983492 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/95c3adf6-7fac-411c-9f54-9eea69052b94-certs\") pod \"machine-config-server-vwm2l\" (UID: \"95c3adf6-7fac-411c-9f54-9eea69052b94\") " pod="openshift-machine-config-operator/machine-config-server-vwm2l" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.983541 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/30934d71-ae7e-491a-933a-f1667b3608e4-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-5jcnx\" (UID: \"30934d71-ae7e-491a-933a-f1667b3608e4\") " pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.985819 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/75c61d4f-c7df-4f0f-b643-6bde1458075a-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-mfb8j\" (UID: \"75c61d4f-c7df-4f0f-b643-6bde1458075a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mfb8j" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.985875 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c493f67-7a6d-4685-afaf-be33fa220751-config\") pod \"openshift-apiserver-operator-796bbdcf4f-77ck4\" (UID: \"7c493f67-7a6d-4685-afaf-be33fa220751\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77ck4" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.987043 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/18a1ac68-146b-4c80-a763-df4b75e6698d-metrics-certs\") pod \"router-default-5444994796-zbwhx\" (UID: \"18a1ac68-146b-4c80-a763-df4b75e6698d\") " pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.988240 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b2810366-b7c9-44b1-8b46-afba9c91937f-cert\") pod \"ingress-canary-ddcsr\" (UID: \"b2810366-b7c9-44b1-8b46-afba9c91937f\") " pod="openshift-ingress-canary/ingress-canary-ddcsr" Nov 28 13:20:46 crc kubenswrapper[4857]: I1128 13:20:46.991188 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/98125640-3946-46e2-aa2b-c70d9a04a8a1-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-j9ldq\" (UID: \"98125640-3946-46e2-aa2b-c70d9a04a8a1\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9ldq" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.001642 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c544t"] Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.003658 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hr869\" (UniqueName: \"kubernetes.io/projected/eba66557-699a-4be9-bc8e-fcedf6155f7e-kube-api-access-hr869\") pod \"machine-api-operator-5694c8668f-pxdz7\" (UID: \"eba66557-699a-4be9-bc8e-fcedf6155f7e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pxdz7" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.020578 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cz4z\" (UniqueName: \"kubernetes.io/projected/30934d71-ae7e-491a-933a-f1667b3608e4-kube-api-access-8cz4z\") pod \"marketplace-operator-79b997595-5jcnx\" (UID: \"30934d71-ae7e-491a-933a-f1667b3608e4\") " pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.024898 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.038445 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:47 crc kubenswrapper[4857]: E1128 13:20:47.038925 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:47.538902334 +0000 UTC m=+139.566277581 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.042844 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b0ba2c12-2e58-47ae-af8f-3a877929fee7-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-fqxp6\" (UID: \"b0ba2c12-2e58-47ae-af8f-3a877929fee7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fqxp6" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.052422 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-pxdz7" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.055508 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" event={"ID":"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c","Type":"ContainerStarted","Data":"9361d4754365efa1937ddbc165fa9817dc5ba730d043bdb882ffa5e0527cb38d"} Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.064213 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hknj5\" (UniqueName: \"kubernetes.io/projected/2240981f-6726-4fd9-b158-eae175371451-kube-api-access-hknj5\") pod \"olm-operator-6b444d44fb-8tnz7\" (UID: \"2240981f-6726-4fd9-b158-eae175371451\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.070793 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.081858 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8f67g\" (UniqueName: \"kubernetes.io/projected/70c93db6-6e53-4870-92f3-e6335deb6936-kube-api-access-8f67g\") pod \"dns-default-tcbhr\" (UID: \"70c93db6-6e53-4870-92f3-e6335deb6936\") " pod="openshift-dns/dns-default-tcbhr" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.109282 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wslz\" (UniqueName: \"kubernetes.io/projected/03986f47-8037-41dd-a995-684a9296a676-kube-api-access-7wslz\") pod \"csi-hostpathplugin-9g9fd\" (UID: \"03986f47-8037-41dd-a995-684a9296a676\") " pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.114962 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.123495 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rr9hh\" (UniqueName: \"kubernetes.io/projected/916dbd05-6649-4ef0-9fdf-b2abe4ee3193-kube-api-access-rr9hh\") pod \"package-server-manager-789f6589d5-wlfw4\" (UID: \"916dbd05-6649-4ef0-9fdf-b2abe4ee3193\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wlfw4" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.132180 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.143591 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:47 crc kubenswrapper[4857]: E1128 13:20:47.143785 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:47.643739226 +0000 UTC m=+139.671114383 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.144195 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:47 crc kubenswrapper[4857]: E1128 13:20:47.144545 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:47.644527569 +0000 UTC m=+139.671902736 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.145216 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fqxp6" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.147712 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pq84n\" (UniqueName: \"kubernetes.io/projected/bfcb44a8-0cab-4938-9f92-0fcc4227a662-kube-api-access-pq84n\") pod \"machine-config-operator-74547568cd-8jrs2\" (UID: \"bfcb44a8-0cab-4938-9f92-0fcc4227a662\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.161699 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6b4q\" (UniqueName: \"kubernetes.io/projected/fc3f1cee-b032-42c0-9996-8c1b815ad0f6-kube-api-access-f6b4q\") pod \"cluster-image-registry-operator-dc59b4c8b-n485x\" (UID: \"fc3f1cee-b032-42c0-9996-8c1b815ad0f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.182113 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fc3f1cee-b032-42c0-9996-8c1b815ad0f6-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-n485x\" (UID: \"fc3f1cee-b032-42c0-9996-8c1b815ad0f6\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.189467 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wlfw4" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.196014 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.207262 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-966xg\" (UniqueName: \"kubernetes.io/projected/95c3adf6-7fac-411c-9f54-9eea69052b94-kube-api-access-966xg\") pod \"machine-config-server-vwm2l\" (UID: \"95c3adf6-7fac-411c-9f54-9eea69052b94\") " pod="openshift-machine-config-operator/machine-config-server-vwm2l" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.225709 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-t9k6q"] Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.227321 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.235103 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-vwm2l" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.235483 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hn7rn\" (UniqueName: \"kubernetes.io/projected/56f8ea27-0443-4e0a-9dd0-2755b61f49f9-kube-api-access-hn7rn\") pod \"catalog-operator-68c6474976-rvh62\" (UID: \"56f8ea27-0443-4e0a-9dd0-2755b61f49f9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.236654 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-tcbhr" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.242265 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c889x\" (UniqueName: \"kubernetes.io/projected/b008b6be-e0fb-4486-9543-1343d458badd-kube-api-access-c889x\") pod \"dns-operator-744455d44c-gqtr2\" (UID: \"b008b6be-e0fb-4486-9543-1343d458badd\") " pod="openshift-dns-operator/dns-operator-744455d44c-gqtr2" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.247154 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:47 crc kubenswrapper[4857]: E1128 13:20:47.247340 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:47.747313362 +0000 UTC m=+139.774688529 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.247515 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:47 crc kubenswrapper[4857]: E1128 13:20:47.247915 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:47.747901659 +0000 UTC m=+139.775276826 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.252018 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:47 crc kubenswrapper[4857]: W1128 13:20:47.264328 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod95c3adf6_7fac_411c_9f54_9eea69052b94.slice/crio-0626f2abbab3edcba1ea5c53be3bf8931971b7f4ec65b0271b876ea5f1dcb9af WatchSource:0}: Error finding container 0626f2abbab3edcba1ea5c53be3bf8931971b7f4ec65b0271b876ea5f1dcb9af: Status 404 returned error can't find the container with id 0626f2abbab3edcba1ea5c53be3bf8931971b7f4ec65b0271b876ea5f1dcb9af Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.297544 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-pxdz7"] Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.298368 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wcsn\" (UniqueName: \"kubernetes.io/projected/75c61d4f-c7df-4f0f-b643-6bde1458075a-kube-api-access-6wcsn\") pod \"control-plane-machine-set-operator-78cbb6b69f-mfb8j\" (UID: \"75c61d4f-c7df-4f0f-b643-6bde1458075a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mfb8j" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.301235 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkv84\" (UniqueName: \"kubernetes.io/projected/98125640-3946-46e2-aa2b-c70d9a04a8a1-kube-api-access-jkv84\") pod \"kube-storage-version-migrator-operator-b67b599dd-j9ldq\" (UID: \"98125640-3946-46e2-aa2b-c70d9a04a8a1\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9ldq" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.301363 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ca56a139-a9ad-46bf-a094-435eef021799-bound-sa-token\") pod \"ingress-operator-5b745b69d9-rl4qj\" (UID: \"ca56a139-a9ad-46bf-a094-435eef021799\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.324564 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cd7d528d-143a-4093-a83a-510b9767a355-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-244qv\" (UID: \"cd7d528d-143a-4093-a83a-510b9767a355\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-244qv" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.340704 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.342590 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmwcv\" (UniqueName: \"kubernetes.io/projected/f2860ffb-7c3e-488f-af31-bfb8609a67d4-kube-api-access-qmwcv\") pod \"collect-profiles-29405595-dbc4d\" (UID: \"f2860ffb-7c3e-488f-af31-bfb8609a67d4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.349235 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:47 crc kubenswrapper[4857]: E1128 13:20:47.349675 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:47.849657762 +0000 UTC m=+139.877032929 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.349741 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:47 crc kubenswrapper[4857]: E1128 13:20:47.350131 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:47.850120616 +0000 UTC m=+139.877495783 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.362319 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fq5hl\" (UniqueName: \"kubernetes.io/projected/2010920b-32b8-4a16-9703-99191b3ac0ac-kube-api-access-fq5hl\") pod \"service-ca-operator-777779d784-qr8zs\" (UID: \"2010920b-32b8-4a16-9703-99191b3ac0ac\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qr8zs" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.362935 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-7plbl"] Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.364731 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-gqtr2" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.379007 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhzgn\" (UniqueName: \"kubernetes.io/projected/ca56a139-a9ad-46bf-a094-435eef021799-kube-api-access-lhzgn\") pod \"ingress-operator-5b745b69d9-rl4qj\" (UID: \"ca56a139-a9ad-46bf-a094-435eef021799\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.393306 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mfb8j" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.404347 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9ldq" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.421604 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfbdb\" (UniqueName: \"kubernetes.io/projected/7c493f67-7a6d-4685-afaf-be33fa220751-kube-api-access-vfbdb\") pod \"openshift-apiserver-operator-796bbdcf4f-77ck4\" (UID: \"7c493f67-7a6d-4685-afaf-be33fa220751\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77ck4" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.426232 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.440411 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.444049 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdsd5\" (UniqueName: \"kubernetes.io/projected/a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee-kube-api-access-sdsd5\") pod \"packageserver-d55dfcdfc-fm4ct\" (UID: \"a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.451895 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:47 crc kubenswrapper[4857]: E1128 13:20:47.452550 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:47.952529518 +0000 UTC m=+139.979904685 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.460763 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.461084 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6f2s7\" (UniqueName: \"kubernetes.io/projected/6d834626-adda-4075-9196-bbf271b7b785-kube-api-access-6f2s7\") pod \"apiserver-7bbb656c7d-j8zwm\" (UID: \"6d834626-adda-4075-9196-bbf271b7b785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.476777 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.479993 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wx2p\" (UniqueName: \"kubernetes.io/projected/40c7002a-571a-4c01-bfb7-a6bbf316a615-kube-api-access-6wx2p\") pod \"apiserver-76f77b778f-2xzcp\" (UID: \"40c7002a-571a-4c01-bfb7-a6bbf316a615\") " pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.481778 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.499445 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwsbb\" (UniqueName: \"kubernetes.io/projected/18a1ac68-146b-4c80-a763-df4b75e6698d-kube-api-access-mwsbb\") pod \"router-default-5444994796-zbwhx\" (UID: \"18a1ac68-146b-4c80-a763-df4b75e6698d\") " pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.501115 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qr8zs" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.518535 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qd2h\" (UniqueName: \"kubernetes.io/projected/a0ce2b1a-313b-400a-b4e4-2b4b32b1785a-kube-api-access-2qd2h\") pod \"migrator-59844c95c7-w9chp\" (UID: \"a0ce2b1a-313b-400a-b4e4-2b4b32b1785a\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-w9chp" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.541437 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rswrc\" (UniqueName: \"kubernetes.io/projected/de0c7377-d49e-4651-b681-cddd455fd280-kube-api-access-rswrc\") pod \"multus-admission-controller-857f4d67dd-hvs7l\" (UID: \"de0c7377-d49e-4651-b681-cddd455fd280\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-hvs7l" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.554112 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:47 crc kubenswrapper[4857]: E1128 13:20:47.554446 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:48.054433036 +0000 UTC m=+140.081808193 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.566088 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbkjx\" (UniqueName: \"kubernetes.io/projected/faa6eb3a-4dd5-4e99-83da-fdd167db88e7-kube-api-access-qbkjx\") pod \"machine-config-controller-84d6567774-tz5lk\" (UID: \"faa6eb3a-4dd5-4e99-83da-fdd167db88e7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-tz5lk" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.582625 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zmdv\" (UniqueName: \"kubernetes.io/projected/0eafa688-6c78-44bc-93de-6e300a65a036-kube-api-access-2zmdv\") pod \"oauth-openshift-558db77b4-hzw48\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.586741 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.598873 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dph7p\" (UniqueName: \"kubernetes.io/projected/219e3c3b-f767-461c-8573-8c2d8da52328-kube-api-access-dph7p\") pod \"service-ca-9c57cc56f-slgpm\" (UID: \"219e3c3b-f767-461c-8573-8c2d8da52328\") " pod="openshift-service-ca/service-ca-9c57cc56f-slgpm" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.616282 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-244qv" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.620381 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vw8wb\" (UniqueName: \"kubernetes.io/projected/b2810366-b7c9-44b1-8b46-afba9c91937f-kube-api-access-vw8wb\") pod \"ingress-canary-ddcsr\" (UID: \"b2810366-b7c9-44b1-8b46-afba9c91937f\") " pod="openshift-ingress-canary/ingress-canary-ddcsr" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.640860 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4bab5b07-9cba-4b9f-9c8d-ce540f57347f-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-46bkn\" (UID: \"4bab5b07-9cba-4b9f-9c8d-ce540f57347f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-46bkn" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.652062 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.655351 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:47 crc kubenswrapper[4857]: E1128 13:20:47.656220 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:48.156191809 +0000 UTC m=+140.183566976 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.658345 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77ck4" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.659000 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fqxp6"] Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.660588 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7"] Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.675561 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x"] Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.678934 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.683395 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-tz5lk" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.697577 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.710083 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-46bkn" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.732641 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-w9chp" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.752418 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-hvs7l" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.758476 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:47 crc kubenswrapper[4857]: E1128 13:20:47.758861 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:48.258846188 +0000 UTC m=+140.286221375 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.768671 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5jcnx"] Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.768952 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-slgpm" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.771591 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-9g9fd"] Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.773659 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wlfw4"] Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.843210 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-ddcsr" Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.859819 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:47 crc kubenswrapper[4857]: E1128 13:20:47.861155 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:48.361119787 +0000 UTC m=+140.388494994 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:47 crc kubenswrapper[4857]: I1128 13:20:47.961927 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:47 crc kubenswrapper[4857]: E1128 13:20:47.962596 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:48.462578842 +0000 UTC m=+140.489954029 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.060697 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" event={"ID":"15243bb2-b17a-4ad4-b4f9-fbb592883207","Type":"ContainerStarted","Data":"d5fb82bffad14faf813b0391c23c256750cb30c52b075ed6ec775d6379af36c4"} Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.062414 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.062602 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-vwm2l" event={"ID":"95c3adf6-7fac-411c-9f54-9eea69052b94","Type":"ContainerStarted","Data":"0626f2abbab3edcba1ea5c53be3bf8931971b7f4ec65b0271b876ea5f1dcb9af"} Nov 28 13:20:48 crc kubenswrapper[4857]: E1128 13:20:48.062891 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:48.562868723 +0000 UTC m=+140.590243900 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.163968 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:48 crc kubenswrapper[4857]: E1128 13:20:48.164601 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:48.664586226 +0000 UTC m=+140.691961403 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.265164 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:48 crc kubenswrapper[4857]: E1128 13:20:48.265492 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:48.765458774 +0000 UTC m=+140.792833981 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.265552 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:48 crc kubenswrapper[4857]: E1128 13:20:48.266077 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:48.766056391 +0000 UTC m=+140.793431598 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.275244 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgrmg\" (UniqueName: \"kubernetes.io/projected/e4655590-7cc2-4489-8a43-d897b47bdd45-kube-api-access-cgrmg\") pod \"openshift-controller-manager-operator-756b6f6bc6-pkcmd\" (UID: \"e4655590-7cc2-4489-8a43-d897b47bdd45\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pkcmd" Nov 28 13:20:48 crc kubenswrapper[4857]: W1128 13:20:48.283364 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfef72e7c_9edd_4a6f_8648_aaaf65497bb6.slice/crio-82ca41a0cede04bc63a401e5567460ab9c79a54d265d0c91acf3776fb9b7e9db WatchSource:0}: Error finding container 82ca41a0cede04bc63a401e5567460ab9c79a54d265d0c91acf3776fb9b7e9db: Status 404 returned error can't find the container with id 82ca41a0cede04bc63a401e5567460ab9c79a54d265d0c91acf3776fb9b7e9db Nov 28 13:20:48 crc kubenswrapper[4857]: W1128 13:20:48.284218 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0ba2c12_2e58_47ae_af8f_3a877929fee7.slice/crio-62cbee6b75812b3df6ffa76fe68669f881852e41555a16e483c3ae158587b903 WatchSource:0}: Error finding container 62cbee6b75812b3df6ffa76fe68669f881852e41555a16e483c3ae158587b903: Status 404 returned error can't find the container with id 62cbee6b75812b3df6ffa76fe68669f881852e41555a16e483c3ae158587b903 Nov 28 13:20:48 crc kubenswrapper[4857]: W1128 13:20:48.291904 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2240981f_6726_4fd9_b158_eae175371451.slice/crio-e7ac66c47d65cd7d4728c699a944233ee763e9f46e49d5b01871b12a603a67c2 WatchSource:0}: Error finding container e7ac66c47d65cd7d4728c699a944233ee763e9f46e49d5b01871b12a603a67c2: Status 404 returned error can't find the container with id e7ac66c47d65cd7d4728c699a944233ee763e9f46e49d5b01871b12a603a67c2 Nov 28 13:20:48 crc kubenswrapper[4857]: W1128 13:20:48.309157 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod916dbd05_6649_4ef0_9fdf_b2abe4ee3193.slice/crio-ce8d26e55e342fe9219f82830ab59df6c17d4d930f3f009d871db3f64e94af2b WatchSource:0}: Error finding container ce8d26e55e342fe9219f82830ab59df6c17d4d930f3f009d871db3f64e94af2b: Status 404 returned error can't find the container with id ce8d26e55e342fe9219f82830ab59df6c17d4d930f3f009d871db3f64e94af2b Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.367299 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:48 crc kubenswrapper[4857]: E1128 13:20:48.367506 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:48.867475125 +0000 UTC m=+140.894850312 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.367556 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:48 crc kubenswrapper[4857]: E1128 13:20:48.368178 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:48.868166914 +0000 UTC m=+140.895542081 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.468404 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:48 crc kubenswrapper[4857]: E1128 13:20:48.468511 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:48.968491327 +0000 UTC m=+140.995866494 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.469176 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:48 crc kubenswrapper[4857]: E1128 13:20:48.469580 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:48.969570078 +0000 UTC m=+140.996945245 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.474046 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-tcbhr"] Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.494717 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pkcmd" Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.507225 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zpqmp"] Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.570407 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:48 crc kubenswrapper[4857]: E1128 13:20:48.570601 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:49.070554939 +0000 UTC m=+141.097930116 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.570742 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:48 crc kubenswrapper[4857]: E1128 13:20:48.571058 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:49.071043873 +0000 UTC m=+141.098419040 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.671676 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:48 crc kubenswrapper[4857]: E1128 13:20:48.672011 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:49.171985963 +0000 UTC m=+141.199361130 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.724111 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d"] Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.772353 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x"] Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.773656 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:48 crc kubenswrapper[4857]: E1128 13:20:48.774073 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:49.274054586 +0000 UTC m=+141.301429823 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:48 crc kubenswrapper[4857]: W1128 13:20:48.848458 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod70c93db6_6e53_4870_92f3_e6335deb6936.slice/crio-3c8eb21382da0c5a9d860910e344eb9353cb421313075c58d267a6653fbf3ad3 WatchSource:0}: Error finding container 3c8eb21382da0c5a9d860910e344eb9353cb421313075c58d267a6653fbf3ad3: Status 404 returned error can't find the container with id 3c8eb21382da0c5a9d860910e344eb9353cb421313075c58d267a6653fbf3ad3 Nov 28 13:20:48 crc kubenswrapper[4857]: W1128 13:20:48.860188 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode22d80c8_5706_475b_a385_00c22ad2eaea.slice/crio-f7febd2edfcc5298e543a9427c7a308f305afb9bf5a6f7ac0e492f5d4370e544 WatchSource:0}: Error finding container f7febd2edfcc5298e543a9427c7a308f305afb9bf5a6f7ac0e492f5d4370e544: Status 404 returned error can't find the container with id f7febd2edfcc5298e543a9427c7a308f305afb9bf5a6f7ac0e492f5d4370e544 Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.874522 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:48 crc kubenswrapper[4857]: E1128 13:20:48.874714 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:49.374687937 +0000 UTC m=+141.402063104 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.874964 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:48 crc kubenswrapper[4857]: E1128 13:20:48.875260 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:49.375251983 +0000 UTC m=+141.402627150 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.950164 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-2xzcp"] Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.976864 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:48 crc kubenswrapper[4857]: E1128 13:20:48.977145 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:49.47711766 +0000 UTC m=+141.504492827 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:48 crc kubenswrapper[4857]: I1128 13:20:48.977404 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:48 crc kubenswrapper[4857]: E1128 13:20:48.977809 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:49.477800459 +0000 UTC m=+141.505175626 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.066316 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" event={"ID":"30934d71-ae7e-491a-933a-f1667b3608e4","Type":"ContainerStarted","Data":"b81aa615ad4190d371ac5c03599170c68e75ce4d7a084f34a986ad8a770da707"} Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.067127 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" event={"ID":"e22d80c8-5706-475b-a385-00c22ad2eaea","Type":"ContainerStarted","Data":"f7febd2edfcc5298e543a9427c7a308f305afb9bf5a6f7ac0e492f5d4370e544"} Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.068036 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" event={"ID":"03986f47-8037-41dd-a995-684a9296a676","Type":"ContainerStarted","Data":"8bef5a483ead5f86aebd85e6e52719d93801e42d92a6dfa70f5aa837643f5c86"} Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.068630 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wlfw4" event={"ID":"916dbd05-6649-4ef0-9fdf-b2abe4ee3193","Type":"ContainerStarted","Data":"ce8d26e55e342fe9219f82830ab59df6c17d4d930f3f009d871db3f64e94af2b"} Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.069404 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-tcbhr" event={"ID":"70c93db6-6e53-4870-92f3-e6335deb6936","Type":"ContainerStarted","Data":"3c8eb21382da0c5a9d860910e344eb9353cb421313075c58d267a6653fbf3ad3"} Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.070116 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-7plbl" event={"ID":"fef72e7c-9edd-4a6f-8648-aaaf65497bb6","Type":"ContainerStarted","Data":"82ca41a0cede04bc63a401e5567460ab9c79a54d265d0c91acf3776fb9b7e9db"} Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.071026 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" event={"ID":"7f4befc0-7e37-4e54-ba59-0e4f698980b6","Type":"ContainerStarted","Data":"d3cb11628316036b63d9a8c4104cd7a4d2dec82f3e8748e387835c459490a1b4"} Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.071658 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7" event={"ID":"2240981f-6726-4fd9-b158-eae175371451","Type":"ContainerStarted","Data":"e7ac66c47d65cd7d4728c699a944233ee763e9f46e49d5b01871b12a603a67c2"} Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.072248 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x" event={"ID":"e846921b-6840-420c-a782-fba505744883","Type":"ContainerStarted","Data":"d447dd24fb10e2991cac5fa12c4efefaa0bf9ab4eed8abb4d8d3ec1b3224a8b9"} Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.072741 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fqxp6" event={"ID":"b0ba2c12-2e58-47ae-af8f-3a877929fee7","Type":"ContainerStarted","Data":"62cbee6b75812b3df6ffa76fe68669f881852e41555a16e483c3ae158587b903"} Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.073632 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-rc8cq" event={"ID":"de8cf09d-8247-4f1f-bce9-01472e9ee181","Type":"ContainerStarted","Data":"02bd4094db6eda2b2930610707d50730065fa65f2f42284cf0bf683c237846a3"} Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.074241 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-pxdz7" event={"ID":"eba66557-699a-4be9-bc8e-fcedf6155f7e","Type":"ContainerStarted","Data":"78811e64d3b1b129349f03c0dd39772a99b94e4cca9ee296d32b3d65c4000153"} Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.078669 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:49 crc kubenswrapper[4857]: E1128 13:20:49.078850 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:49.578830982 +0000 UTC m=+141.606206149 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.078964 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:49 crc kubenswrapper[4857]: E1128 13:20:49.079267 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:49.579256814 +0000 UTC m=+141.606631981 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.179798 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:49 crc kubenswrapper[4857]: E1128 13:20:49.179908 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:49.679869045 +0000 UTC m=+141.707244212 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.180086 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:49 crc kubenswrapper[4857]: E1128 13:20:49.180425 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:49.680417901 +0000 UTC m=+141.707793068 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:49 crc kubenswrapper[4857]: W1128 13:20:49.232126 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf2860ffb_7c3e_488f_af31_bfb8609a67d4.slice/crio-495a98c5746bf76d723550d7dab4971d7eb076a75abaa3f3921787e28aaa52de WatchSource:0}: Error finding container 495a98c5746bf76d723550d7dab4971d7eb076a75abaa3f3921787e28aaa52de: Status 404 returned error can't find the container with id 495a98c5746bf76d723550d7dab4971d7eb076a75abaa3f3921787e28aaa52de Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.285264 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:49 crc kubenswrapper[4857]: E1128 13:20:49.285453 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:49.785414287 +0000 UTC m=+141.812789494 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.285596 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:49 crc kubenswrapper[4857]: E1128 13:20:49.286312 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:49.786297203 +0000 UTC m=+141.813672410 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.387380 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:49 crc kubenswrapper[4857]: E1128 13:20:49.388038 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:49.888023785 +0000 UTC m=+141.915398952 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.420777 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm"] Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.488817 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:49 crc kubenswrapper[4857]: E1128 13:20:49.489143 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:49.98913204 +0000 UTC m=+142.016507207 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.489920 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2"] Nov 28 13:20:49 crc kubenswrapper[4857]: W1128 13:20:49.525478 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40c7002a_571a_4c01_bfb7_a6bbf316a615.slice/crio-ad0a7adb15fdf0f231378ee14f01412ce560f7836df63e966a9e439ffa1a0fbf WatchSource:0}: Error finding container ad0a7adb15fdf0f231378ee14f01412ce560f7836df63e966a9e439ffa1a0fbf: Status 404 returned error can't find the container with id ad0a7adb15fdf0f231378ee14f01412ce560f7836df63e966a9e439ffa1a0fbf Nov 28 13:20:49 crc kubenswrapper[4857]: W1128 13:20:49.561149 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6d834626_adda_4075_9196_bbf271b7b785.slice/crio-eb37f8a27d73160be279bb05fd4f666659f307ec5a1ad73396e63cfef9c2a0b8 WatchSource:0}: Error finding container eb37f8a27d73160be279bb05fd4f666659f307ec5a1ad73396e63cfef9c2a0b8: Status 404 returned error can't find the container with id eb37f8a27d73160be279bb05fd4f666659f307ec5a1ad73396e63cfef9c2a0b8 Nov 28 13:20:49 crc kubenswrapper[4857]: W1128 13:20:49.570992 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbfcb44a8_0cab_4938_9f92_0fcc4227a662.slice/crio-b4ee9353b87281112fce5cc9ef116e89e745d44ce369eeaefb0ba26e069ff27a WatchSource:0}: Error finding container b4ee9353b87281112fce5cc9ef116e89e745d44ce369eeaefb0ba26e069ff27a: Status 404 returned error can't find the container with id b4ee9353b87281112fce5cc9ef116e89e745d44ce369eeaefb0ba26e069ff27a Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.592405 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:49 crc kubenswrapper[4857]: E1128 13:20:49.592639 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:50.092605363 +0000 UTC m=+142.119980550 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.592911 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:49 crc kubenswrapper[4857]: E1128 13:20:49.593296 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:50.093284603 +0000 UTC m=+142.120659770 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.695244 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:49 crc kubenswrapper[4857]: E1128 13:20:49.695702 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:50.195682714 +0000 UTC m=+142.223057881 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.796570 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:49 crc kubenswrapper[4857]: E1128 13:20:49.797977 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:50.297964743 +0000 UTC m=+142.325339910 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:49 crc kubenswrapper[4857]: W1128 13:20:49.889930 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod18a1ac68_146b_4c80_a763_df4b75e6698d.slice/crio-838c6f17f94895b7cac950182f5b6baea6158a67981f8a4df2ec29ae969659bc WatchSource:0}: Error finding container 838c6f17f94895b7cac950182f5b6baea6158a67981f8a4df2ec29ae969659bc: Status 404 returned error can't find the container with id 838c6f17f94895b7cac950182f5b6baea6158a67981f8a4df2ec29ae969659bc Nov 28 13:20:49 crc kubenswrapper[4857]: I1128 13:20:49.900332 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:49 crc kubenswrapper[4857]: E1128 13:20:49.900732 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:50.400717845 +0000 UTC m=+142.428093012 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.005951 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:50 crc kubenswrapper[4857]: E1128 13:20:50.006303 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:50.506287388 +0000 UTC m=+142.533662555 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.080236 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj"] Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.082423 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-zbwhx" event={"ID":"18a1ac68-146b-4c80-a763-df4b75e6698d","Type":"ContainerStarted","Data":"838c6f17f94895b7cac950182f5b6baea6158a67981f8a4df2ec29ae969659bc"} Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.084082 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-fx2d9" event={"ID":"3a15c6a7-1b90-42e1-8e1d-ccdba481e6db","Type":"ContainerStarted","Data":"19b60fbc69a65e2799b1a90f638ffbddee370f88e5e169a443672d834cafcdb5"} Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.087678 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" event={"ID":"6d834626-adda-4075-9196-bbf271b7b785","Type":"ContainerStarted","Data":"eb37f8a27d73160be279bb05fd4f666659f307ec5a1ad73396e63cfef9c2a0b8"} Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.091698 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" event={"ID":"40c7002a-571a-4c01-bfb7-a6bbf316a615","Type":"ContainerStarted","Data":"ad0a7adb15fdf0f231378ee14f01412ce560f7836df63e966a9e439ffa1a0fbf"} Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.096562 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c544t" event={"ID":"7e845cce-5c0e-4fad-bd24-6ff321ea3c02","Type":"ContainerStarted","Data":"8e1279e65ec2be9c95ee01995ef5e1e715753eea90bc12c192164743ebb42f1b"} Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.107205 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:50 crc kubenswrapper[4857]: E1128 13:20:50.107519 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:50.607496276 +0000 UTC m=+142.634871453 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.116477 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2" event={"ID":"bfcb44a8-0cab-4938-9f92-0fcc4227a662","Type":"ContainerStarted","Data":"b4ee9353b87281112fce5cc9ef116e89e745d44ce369eeaefb0ba26e069ff27a"} Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.118980 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x" event={"ID":"fc3f1cee-b032-42c0-9996-8c1b815ad0f6","Type":"ContainerStarted","Data":"e8b823d7848ee406de045a00510d026da020b97e15ceafbf30e05a4de4e60da9"} Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.122278 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p" event={"ID":"d1c0a9ad-6310-4b36-82cf-775aad2a3232","Type":"ContainerStarted","Data":"50d01b626ae097aa06de20b61c4f26acd6cdc177da0a9a6f4802d6e51ade90c3"} Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.123493 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d" event={"ID":"f2860ffb-7c3e-488f-af31-bfb8609a67d4","Type":"ContainerStarted","Data":"495a98c5746bf76d723550d7dab4971d7eb076a75abaa3f3921787e28aaa52de"} Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.128504 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" event={"ID":"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c","Type":"ContainerStarted","Data":"7770c9f648981f653774f4944176e0ad8d999d5135ba3e05ee61acdb97f02fcc"} Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.209059 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:50 crc kubenswrapper[4857]: E1128 13:20:50.209499 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:50.709482206 +0000 UTC m=+142.736857373 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.312044 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:50 crc kubenswrapper[4857]: E1128 13:20:50.312470 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:50.812447004 +0000 UTC m=+142.839822171 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.414890 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:50 crc kubenswrapper[4857]: E1128 13:20:50.415609 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:50.915596648 +0000 UTC m=+142.942971815 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.516332 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:50 crc kubenswrapper[4857]: E1128 13:20:50.516498 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:51.016472025 +0000 UTC m=+143.043847192 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.516888 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:50 crc kubenswrapper[4857]: E1128 13:20:50.517319 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:51.017308219 +0000 UTC m=+143.044683386 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.592920 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-46bkn"] Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.600888 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-hzw48"] Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.619274 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:50 crc kubenswrapper[4857]: E1128 13:20:50.619870 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:51.119840835 +0000 UTC m=+143.147216002 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.637221 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct"] Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.652395 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-slgpm"] Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.656889 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62"] Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.660373 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9ldq"] Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.664822 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-hvs7l"] Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.664874 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-gqtr2"] Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.665900 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-tz5lk"] Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.717553 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-w9chp"] Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.723816 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:50 crc kubenswrapper[4857]: E1128 13:20:50.724352 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:51.224326617 +0000 UTC m=+143.251701784 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.726638 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-ddcsr"] Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.729115 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mfb8j"] Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.730681 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77ck4"] Nov 28 13:20:50 crc kubenswrapper[4857]: W1128 13:20:50.770573 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfaa6eb3a_4dd5_4e99_83da_fdd167db88e7.slice/crio-34b1e55a8c9674813827035d4a5229558b1dfe80afb2c4b1ee23ce57616d5d04 WatchSource:0}: Error finding container 34b1e55a8c9674813827035d4a5229558b1dfe80afb2c4b1ee23ce57616d5d04: Status 404 returned error can't find the container with id 34b1e55a8c9674813827035d4a5229558b1dfe80afb2c4b1ee23ce57616d5d04 Nov 28 13:20:50 crc kubenswrapper[4857]: W1128 13:20:50.774950 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb008b6be_e0fb_4486_9543_1343d458badd.slice/crio-f19abc765e98696de57720921065df4767a765d1acd3cb53513552e618f4a078 WatchSource:0}: Error finding container f19abc765e98696de57720921065df4767a765d1acd3cb53513552e618f4a078: Status 404 returned error can't find the container with id f19abc765e98696de57720921065df4767a765d1acd3cb53513552e618f4a078 Nov 28 13:20:50 crc kubenswrapper[4857]: W1128 13:20:50.777131 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda0ce2b1a_313b_400a_b4e4_2b4b32b1785a.slice/crio-8c977cd2d093ad205effc1eb183a2f4334fae239d925cfb481f3f84a53b0395b WatchSource:0}: Error finding container 8c977cd2d093ad205effc1eb183a2f4334fae239d925cfb481f3f84a53b0395b: Status 404 returned error can't find the container with id 8c977cd2d093ad205effc1eb183a2f4334fae239d925cfb481f3f84a53b0395b Nov 28 13:20:50 crc kubenswrapper[4857]: W1128 13:20:50.779391 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb2810366_b7c9_44b1_8b46_afba9c91937f.slice/crio-5f93e337600323d1538c96f92cb144dd748fff206fa4e848864a2827c77194e0 WatchSource:0}: Error finding container 5f93e337600323d1538c96f92cb144dd748fff206fa4e848864a2827c77194e0: Status 404 returned error can't find the container with id 5f93e337600323d1538c96f92cb144dd748fff206fa4e848864a2827c77194e0 Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.811990 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pkcmd"] Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.824737 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:50 crc kubenswrapper[4857]: E1128 13:20:50.834495 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:51.334473072 +0000 UTC m=+143.361848239 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.837450 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-qr8zs"] Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.841285 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-244qv"] Nov 28 13:20:50 crc kubenswrapper[4857]: I1128 13:20:50.935324 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:50 crc kubenswrapper[4857]: E1128 13:20:50.935996 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:51.435962837 +0000 UTC m=+143.463338004 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.036613 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:51 crc kubenswrapper[4857]: E1128 13:20:51.036940 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:51.536888047 +0000 UTC m=+143.564263214 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.139099 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:51 crc kubenswrapper[4857]: E1128 13:20:51.139889 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:51.639857665 +0000 UTC m=+143.667232882 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:51 crc kubenswrapper[4857]: W1128 13:20:51.142212 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode4655590_7cc2_4489_8a43_d897b47bdd45.slice/crio-86c921dfba268c8ff0b8fa11f7651f7817e4c43cee4f0d26bf040e5db0cae08f WatchSource:0}: Error finding container 86c921dfba268c8ff0b8fa11f7651f7817e4c43cee4f0d26bf040e5db0cae08f: Status 404 returned error can't find the container with id 86c921dfba268c8ff0b8fa11f7651f7817e4c43cee4f0d26bf040e5db0cae08f Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.146387 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-ddcsr" event={"ID":"b2810366-b7c9-44b1-8b46-afba9c91937f","Type":"ContainerStarted","Data":"5f93e337600323d1538c96f92cb144dd748fff206fa4e848864a2827c77194e0"} Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.149778 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj" event={"ID":"ca56a139-a9ad-46bf-a094-435eef021799","Type":"ContainerStarted","Data":"5c0717f2583db4779a259e4f8b2696069ee02582f3774c12ee25e750ad885656"} Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.152173 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9ldq" event={"ID":"98125640-3946-46e2-aa2b-c70d9a04a8a1","Type":"ContainerStarted","Data":"de97d2dc751b8442bae4959d10327c83603eef8953f446d1387bcc06a0759aff"} Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.154345 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-46bkn" event={"ID":"4bab5b07-9cba-4b9f-9c8d-ce540f57347f","Type":"ContainerStarted","Data":"b8f1d5a3604b3edd0e494fedf5890c32c12a6b6dc974e648d3191edf9e2b92ad"} Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.161224 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-slgpm" event={"ID":"219e3c3b-f767-461c-8573-8c2d8da52328","Type":"ContainerStarted","Data":"e081994db8df89fbe2e56b34e07ff42f7db2d7fbe14a52689e2226929aa0281e"} Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.168854 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-hvs7l" event={"ID":"de0c7377-d49e-4651-b681-cddd455fd280","Type":"ContainerStarted","Data":"e5448172a582eb26e94187e41461c1bc71df57c8a51669745f501e815019c809"} Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.171077 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-tz5lk" event={"ID":"faa6eb3a-4dd5-4e99-83da-fdd167db88e7","Type":"ContainerStarted","Data":"34b1e55a8c9674813827035d4a5229558b1dfe80afb2c4b1ee23ce57616d5d04"} Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.172334 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-244qv" event={"ID":"cd7d528d-143a-4093-a83a-510b9767a355","Type":"ContainerStarted","Data":"a444504834bfa9741e9b2c02e3306b38efc5abefaa8729300a7c748756ccb646"} Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.173988 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-gqtr2" event={"ID":"b008b6be-e0fb-4486-9543-1343d458badd","Type":"ContainerStarted","Data":"f19abc765e98696de57720921065df4767a765d1acd3cb53513552e618f4a078"} Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.175012 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mfb8j" event={"ID":"75c61d4f-c7df-4f0f-b643-6bde1458075a","Type":"ContainerStarted","Data":"82cb71d9649c4af05cc8a7f0a0555e0999f2233b98caf5fd6e1cb61f30b733c1"} Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.176086 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" event={"ID":"0eafa688-6c78-44bc-93de-6e300a65a036","Type":"ContainerStarted","Data":"61fb7be051bb07a822e00fd7efb499de80a29fffdb2bedf0c616cbd5b383d6fc"} Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.178126 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" event={"ID":"a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee","Type":"ContainerStarted","Data":"b72553ee21fcc58fe59126fb0b326e53f075104700835bda5f69b673d01e8adb"} Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.179776 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77ck4" event={"ID":"7c493f67-7a6d-4685-afaf-be33fa220751","Type":"ContainerStarted","Data":"6f6043ede73432ff128177d11137346c4a82ac291923de5ea09e65fdf928a1b4"} Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.180999 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qr8zs" event={"ID":"2010920b-32b8-4a16-9703-99191b3ac0ac","Type":"ContainerStarted","Data":"4872968aaf335dd63eb39cdcd5154bcfcf322a575201cff7ed4bec1469a3e459"} Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.182159 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62" event={"ID":"56f8ea27-0443-4e0a-9dd0-2755b61f49f9","Type":"ContainerStarted","Data":"3c6f070ccad62390907ce9ffac97f941f13fcdfb4128cc13aed97d915098911e"} Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.183905 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" event={"ID":"15243bb2-b17a-4ad4-b4f9-fbb592883207","Type":"ContainerStarted","Data":"a1b8a062c75f56bcad553f2edf2eebd43427b7fc6862877749dcf2243a995083"} Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.185847 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-w9chp" event={"ID":"a0ce2b1a-313b-400a-b4e4-2b4b32b1785a","Type":"ContainerStarted","Data":"8c977cd2d093ad205effc1eb183a2f4334fae239d925cfb481f3f84a53b0395b"} Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.185909 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-rc8cq" Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.187739 4857 patch_prober.go:28] interesting pod/downloads-7954f5f757-rc8cq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.5:8080/\": dial tcp 10.217.0.5:8080: connect: connection refused" start-of-body= Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.187847 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rc8cq" podUID="de8cf09d-8247-4f1f-bce9-01472e9ee181" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.5:8080/\": dial tcp 10.217.0.5:8080: connect: connection refused" Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.201497 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-rc8cq" podStartSLOduration=123.201476906 podStartE2EDuration="2m3.201476906s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:51.20129612 +0000 UTC m=+143.228671287" watchObservedRunningTime="2025-11-28 13:20:51.201476906 +0000 UTC m=+143.228852073" Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.240920 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:51 crc kubenswrapper[4857]: E1128 13:20:51.241173 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:51.741132575 +0000 UTC m=+143.768507742 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.343790 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:51 crc kubenswrapper[4857]: E1128 13:20:51.344294 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:51.844262638 +0000 UTC m=+143.871637835 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:51 crc kubenswrapper[4857]: I1128 13:20:51.445332 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:52 crc kubenswrapper[4857]: E1128 13:20:51.445564 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:51.945500846 +0000 UTC m=+143.972876063 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:52 crc kubenswrapper[4857]: I1128 13:20:51.446854 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:52 crc kubenswrapper[4857]: E1128 13:20:51.447306 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:51.947282468 +0000 UTC m=+143.974657675 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:52 crc kubenswrapper[4857]: I1128 13:20:51.548242 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:52 crc kubenswrapper[4857]: E1128 13:20:51.548491 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:52.048460075 +0000 UTC m=+144.075835252 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:52 crc kubenswrapper[4857]: I1128 13:20:51.548786 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:52 crc kubenswrapper[4857]: E1128 13:20:51.549214 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:52.049198596 +0000 UTC m=+144.076573763 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:52 crc kubenswrapper[4857]: I1128 13:20:52.369112 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:52 crc kubenswrapper[4857]: E1128 13:20:52.369928 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:53.369898195 +0000 UTC m=+145.397273452 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:52 crc kubenswrapper[4857]: I1128 13:20:52.381617 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pkcmd" event={"ID":"e4655590-7cc2-4489-8a43-d897b47bdd45","Type":"ContainerStarted","Data":"86c921dfba268c8ff0b8fa11f7651f7817e4c43cee4f0d26bf040e5db0cae08f"} Nov 28 13:20:52 crc kubenswrapper[4857]: I1128 13:20:52.382377 4857 patch_prober.go:28] interesting pod/downloads-7954f5f757-rc8cq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.5:8080/\": dial tcp 10.217.0.5:8080: connect: connection refused" start-of-body= Nov 28 13:20:52 crc kubenswrapper[4857]: I1128 13:20:52.382407 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rc8cq" podUID="de8cf09d-8247-4f1f-bce9-01472e9ee181" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.5:8080/\": dial tcp 10.217.0.5:8080: connect: connection refused" Nov 28 13:20:52 crc kubenswrapper[4857]: I1128 13:20:52.405915 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-x8g9t" podStartSLOduration=124.405897859 podStartE2EDuration="2m4.405897859s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:52.403023647 +0000 UTC m=+144.430398824" watchObservedRunningTime="2025-11-28 13:20:52.405897859 +0000 UTC m=+144.433273026" Nov 28 13:20:52 crc kubenswrapper[4857]: I1128 13:20:52.470304 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:52 crc kubenswrapper[4857]: E1128 13:20:52.470895 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:52.970874926 +0000 UTC m=+144.998250103 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:52 crc kubenswrapper[4857]: I1128 13:20:52.571290 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:52 crc kubenswrapper[4857]: E1128 13:20:52.571898 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:53.071881488 +0000 UTC m=+145.099256655 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:52 crc kubenswrapper[4857]: I1128 13:20:52.688236 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:52 crc kubenswrapper[4857]: E1128 13:20:52.689087 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:53.189068875 +0000 UTC m=+145.216444042 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:52 crc kubenswrapper[4857]: I1128 13:20:52.790908 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:52 crc kubenswrapper[4857]: E1128 13:20:52.791436 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:53.291416275 +0000 UTC m=+145.318791442 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:52 crc kubenswrapper[4857]: I1128 13:20:52.892344 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:52 crc kubenswrapper[4857]: E1128 13:20:52.892762 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:53.392727026 +0000 UTC m=+145.420102223 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:52 crc kubenswrapper[4857]: I1128 13:20:52.994452 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:52 crc kubenswrapper[4857]: E1128 13:20:52.995094 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:53.495047926 +0000 UTC m=+145.522423093 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.096373 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:53 crc kubenswrapper[4857]: E1128 13:20:53.096717 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:53.596612864 +0000 UTC m=+145.623988041 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.097232 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:53 crc kubenswrapper[4857]: E1128 13:20:53.097527 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:53.597513379 +0000 UTC m=+145.624888546 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.198823 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:53 crc kubenswrapper[4857]: E1128 13:20:53.198973 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:53.698946184 +0000 UTC m=+145.726321351 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.199772 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:53 crc kubenswrapper[4857]: E1128 13:20:53.200102 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:53.700094967 +0000 UTC m=+145.727470134 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.300610 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:53 crc kubenswrapper[4857]: E1128 13:20:53.301044 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:53.801029266 +0000 UTC m=+145.828404433 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.401861 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:53 crc kubenswrapper[4857]: E1128 13:20:53.402631 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:53.902614185 +0000 UTC m=+145.929989352 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.487553 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" event={"ID":"e22d80c8-5706-475b-a385-00c22ad2eaea","Type":"ContainerStarted","Data":"f7afc25e123b2ca66a05f051f57b6b708689ad93856277fafc46e8ef6856fe89"} Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.489131 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.507019 4857 generic.go:334] "Generic (PLEG): container finished" podID="40c7002a-571a-4c01-bfb7-a6bbf316a615" containerID="3f51b0da1be3e1ae275315652af7f2e02d2c398b1b3932076647ba7976d48eb7" exitCode=0 Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.507647 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" event={"ID":"40c7002a-571a-4c01-bfb7-a6bbf316a615","Type":"ContainerDied","Data":"3f51b0da1be3e1ae275315652af7f2e02d2c398b1b3932076647ba7976d48eb7"} Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.514080 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.515975 4857 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-zpqmp container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.516069 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" podUID="e22d80c8-5706-475b-a385-00c22ad2eaea" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Nov 28 13:20:53 crc kubenswrapper[4857]: E1128 13:20:53.516091 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:54.016066195 +0000 UTC m=+146.043441362 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.530486 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:53 crc kubenswrapper[4857]: E1128 13:20:53.532343 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:54.032324142 +0000 UTC m=+146.059699309 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.559994 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" podStartSLOduration=125.559974806 podStartE2EDuration="2m5.559974806s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:53.558563036 +0000 UTC m=+145.585938193" watchObservedRunningTime="2025-11-28 13:20:53.559974806 +0000 UTC m=+145.587349973" Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.571003 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-hvs7l" event={"ID":"de0c7377-d49e-4651-b681-cddd455fd280","Type":"ContainerStarted","Data":"29e40b1c06c35757621108cd74d3dbfeef491e75fb84ce3d459be1810d534665"} Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.615366 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" event={"ID":"30934d71-ae7e-491a-933a-f1667b3608e4","Type":"ContainerStarted","Data":"129fce64d5d4ac6365cba484402d8bec6af17369085f3a5147ca1f1d1462ca71"} Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.616823 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.631096 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.631993 4857 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-5jcnx container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.632042 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" podUID="30934d71-ae7e-491a-933a-f1667b3608e4" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" Nov 28 13:20:53 crc kubenswrapper[4857]: E1128 13:20:53.633060 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:54.133035085 +0000 UTC m=+146.160410452 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.650429 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x" event={"ID":"fc3f1cee-b032-42c0-9996-8c1b815ad0f6","Type":"ContainerStarted","Data":"7079873a36567200162b7a3b1c0fa2f58add127e543c0be6db374b106725cd5d"} Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.651606 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" podStartSLOduration=124.651585998 podStartE2EDuration="2m4.651585998s" podCreationTimestamp="2025-11-28 13:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:53.651527776 +0000 UTC m=+145.678902953" watchObservedRunningTime="2025-11-28 13:20:53.651585998 +0000 UTC m=+145.678961165" Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.679495 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj" event={"ID":"ca56a139-a9ad-46bf-a094-435eef021799","Type":"ContainerStarted","Data":"00a10c1ad23a3a0d2153d49675f7a755ac7822f18ca808ca860fea3540a11561"} Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.700634 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-n485x" podStartSLOduration=125.700616657 podStartE2EDuration="2m5.700616657s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:53.700175004 +0000 UTC m=+145.727550171" watchObservedRunningTime="2025-11-28 13:20:53.700616657 +0000 UTC m=+145.727991824" Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.717969 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c544t" event={"ID":"7e845cce-5c0e-4fad-bd24-6ff321ea3c02","Type":"ContainerStarted","Data":"1db7dd43a64b01ac770bf894eaeea6ad15132df64ae4dec1cc7085165fc8d228"} Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.732279 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:53 crc kubenswrapper[4857]: E1128 13:20:53.734002 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:54.233990206 +0000 UTC m=+146.261365373 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.760540 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" event={"ID":"a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee","Type":"ContainerStarted","Data":"9319c070ec6371c6637bc10782f8048db5a518fdd7a40dba88f8b851953a4b9b"} Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.761559 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.780910 4857 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-fm4ct container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:5443/healthz\": dial tcp 10.217.0.34:5443: connect: connection refused" start-of-body= Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.780973 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" podUID="a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.34:5443/healthz\": dial tcp 10.217.0.34:5443: connect: connection refused" Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.803173 4857 generic.go:334] "Generic (PLEG): container finished" podID="e846921b-6840-420c-a782-fba505744883" containerID="f9d2c499c82e20b6ea2e8edd5ba655e5120d1ac99a03ffd90b59b95749467e7d" exitCode=0 Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.803354 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x" event={"ID":"e846921b-6840-420c-a782-fba505744883","Type":"ContainerDied","Data":"f9d2c499c82e20b6ea2e8edd5ba655e5120d1ac99a03ffd90b59b95749467e7d"} Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.820530 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-w9chp" event={"ID":"a0ce2b1a-313b-400a-b4e4-2b4b32b1785a","Type":"ContainerStarted","Data":"96a0d49ca40b620cb4e938f13b56e4921c638332d5dcfa0a0e6743b5e0d18a8c"} Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.827723 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9ldq" event={"ID":"98125640-3946-46e2-aa2b-c70d9a04a8a1","Type":"ContainerStarted","Data":"82fdd1e0c66a2b319604cf2165dde7e9fad4321d0286b5f1cb633a05e4da1a71"} Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.833428 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:53 crc kubenswrapper[4857]: E1128 13:20:53.835187 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:54.335158382 +0000 UTC m=+146.362533549 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.846899 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" event={"ID":"03986f47-8037-41dd-a995-684a9296a676","Type":"ContainerStarted","Data":"6c5b4a456c7fc181563a878716478309cdbdd85a14eae203436dfe12435b2279"} Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.875927 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" podStartSLOduration=124.875901303 podStartE2EDuration="2m4.875901303s" podCreationTimestamp="2025-11-28 13:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:53.815018534 +0000 UTC m=+145.842393701" watchObservedRunningTime="2025-11-28 13:20:53.875901303 +0000 UTC m=+145.903276470" Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.914680 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-zbwhx" event={"ID":"18a1ac68-146b-4c80-a763-df4b75e6698d","Type":"ContainerStarted","Data":"eedab5246df2b01e997d7c71627e6b0704b6870d86a7f559b29c0efbb0296f18"} Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.936788 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:53 crc kubenswrapper[4857]: E1128 13:20:53.939988 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:54.439973184 +0000 UTC m=+146.467348351 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.967407 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-ddcsr" event={"ID":"b2810366-b7c9-44b1-8b46-afba9c91937f","Type":"ContainerStarted","Data":"d34e6fc4e95b97c0655020ca5b944a1f159324879846ac138d319746beddb872"} Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.983614 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-zbwhx" podStartSLOduration=125.983589387 podStartE2EDuration="2m5.983589387s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:53.982338851 +0000 UTC m=+146.009714018" watchObservedRunningTime="2025-11-28 13:20:53.983589387 +0000 UTC m=+146.010964554" Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.992440 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-vwm2l" event={"ID":"95c3adf6-7fac-411c-9f54-9eea69052b94","Type":"ContainerStarted","Data":"62e7185ad585c83d7af2c7147650e7ae93e280e6a0e4986c085e4170aa0cca27"} Nov 28 13:20:53 crc kubenswrapper[4857]: I1128 13:20:53.999777 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9ldq" podStartSLOduration=124.99972444 podStartE2EDuration="2m4.99972444s" podCreationTimestamp="2025-11-28 13:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:53.942440515 +0000 UTC m=+145.969815692" watchObservedRunningTime="2025-11-28 13:20:53.99972444 +0000 UTC m=+146.027099607" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.007595 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-ddcsr" podStartSLOduration=10.007571646 podStartE2EDuration="10.007571646s" podCreationTimestamp="2025-11-28 13:20:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:54.005592029 +0000 UTC m=+146.032967196" watchObservedRunningTime="2025-11-28 13:20:54.007571646 +0000 UTC m=+146.034946803" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.029130 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-7plbl" event={"ID":"fef72e7c-9edd-4a6f-8648-aaaf65497bb6","Type":"ContainerStarted","Data":"20c39de4bb4874b18d0b2f8718f9dda04b695996aceb4d1f7ee6081394d7d0fd"} Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.034497 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-slgpm" event={"ID":"219e3c3b-f767-461c-8573-8c2d8da52328","Type":"ContainerStarted","Data":"e288555ef4f87b44b8426648044741696a509b04a98036097b350acc7f3f2fab"} Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.037716 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:54 crc kubenswrapper[4857]: E1128 13:20:54.038090 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:54.538056842 +0000 UTC m=+146.565432009 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.038335 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.044458 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:20:54 crc kubenswrapper[4857]: E1128 13:20:54.050584 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:54.550568981 +0000 UTC m=+146.577944148 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.053031 4857 generic.go:334] "Generic (PLEG): container finished" podID="6d834626-adda-4075-9196-bbf271b7b785" containerID="53af483e961dec22d8bc9e1c06c656868cb113a34139733827eef041a3114e82" exitCode=0 Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.053133 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" event={"ID":"6d834626-adda-4075-9196-bbf271b7b785","Type":"ContainerDied","Data":"53af483e961dec22d8bc9e1c06c656868cb113a34139733827eef041a3114e82"} Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.055224 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-vwm2l" podStartSLOduration=10.055210494 podStartE2EDuration="10.055210494s" podCreationTimestamp="2025-11-28 13:20:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:54.05437496 +0000 UTC m=+146.081750137" watchObservedRunningTime="2025-11-28 13:20:54.055210494 +0000 UTC m=+146.082585661" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.056182 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77ck4" event={"ID":"7c493f67-7a6d-4685-afaf-be33fa220751","Type":"ContainerStarted","Data":"81a7e399b8a1d2dba1ae7839670f0aa1ccac58954c49c260af0128af95abd739"} Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.071768 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7" event={"ID":"2240981f-6726-4fd9-b158-eae175371451","Type":"ContainerStarted","Data":"af0b7484bc71cf2929ee439f13cfcd4ef32efb270525c0ee5e08899925c6f57c"} Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.072431 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.078338 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-pxdz7" event={"ID":"eba66557-699a-4be9-bc8e-fcedf6155f7e","Type":"ContainerStarted","Data":"720d2e67fb253d1f39bb15c807444fcd61f23655e6e2abc8e523ac354597bbff"} Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.099113 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.106013 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d" event={"ID":"f2860ffb-7c3e-488f-af31-bfb8609a67d4","Type":"ContainerStarted","Data":"5f9e04216adfa76c3ee316738d6e01322a2b2ccc741c8869c2d98b1d89763193"} Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.111463 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wlfw4" event={"ID":"916dbd05-6649-4ef0-9fdf-b2abe4ee3193","Type":"ContainerStarted","Data":"f62f2545c183460f28616aad0f546affee99ddac9dd12b352246314f495c447c"} Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.112020 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wlfw4" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.116949 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pkcmd" event={"ID":"e4655590-7cc2-4489-8a43-d897b47bdd45","Type":"ContainerStarted","Data":"f7f95b3fb74b3ac1c63f53e2c3a8edbf284be023c7ce811f8a907e82d18e40a4"} Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.150017 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:54 crc kubenswrapper[4857]: E1128 13:20:54.151484 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:54.651457989 +0000 UTC m=+146.678833156 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.166975 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fqxp6" event={"ID":"b0ba2c12-2e58-47ae-af8f-3a877929fee7","Type":"ContainerStarted","Data":"b63fdac415a4301f666f6df0a5686ed9d4cc4a91eeb72a0ea2546cf855203653"} Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.173958 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-7plbl" podStartSLOduration=126.173941745 podStartE2EDuration="2m6.173941745s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:54.127968394 +0000 UTC m=+146.155343561" watchObservedRunningTime="2025-11-28 13:20:54.173941745 +0000 UTC m=+146.201316912" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.193054 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-tcbhr" event={"ID":"70c93db6-6e53-4870-92f3-e6335deb6936","Type":"ContainerStarted","Data":"09704f00ba7d9ad16a24b030bb0bdfa48563350c57409b96a2c6f27b26b84494"} Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.215927 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2" event={"ID":"bfcb44a8-0cab-4938-9f92-0fcc4227a662","Type":"ContainerStarted","Data":"a38df993a1254475e257370820ad063b750494c58d522ce19a1b7f9f05d4cde4"} Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.219185 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62" event={"ID":"56f8ea27-0443-4e0a-9dd0-2755b61f49f9","Type":"ContainerStarted","Data":"24bbf4504282943ffdcc5acd3097622db0bbd97996be7909e29755dbdbfb07ff"} Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.220928 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.226644 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-slgpm" podStartSLOduration=125.226620948 podStartE2EDuration="2m5.226620948s" podCreationTimestamp="2025-11-28 13:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:54.192725314 +0000 UTC m=+146.220100481" watchObservedRunningTime="2025-11-28 13:20:54.226620948 +0000 UTC m=+146.253996115" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.227578 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wlfw4" podStartSLOduration=125.227573095 podStartE2EDuration="2m5.227573095s" podCreationTimestamp="2025-11-28 13:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:54.225964249 +0000 UTC m=+146.253339416" watchObservedRunningTime="2025-11-28 13:20:54.227573095 +0000 UTC m=+146.254948262" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.253677 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.254063 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mfb8j" event={"ID":"75c61d4f-c7df-4f0f-b643-6bde1458075a","Type":"ContainerStarted","Data":"d299b64669903e33c17f7c10f3d1981e8e6f4dee4c4fa9ed198a535d421dfca2"} Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.255184 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62" Nov 28 13:20:54 crc kubenswrapper[4857]: E1128 13:20:54.255816 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:54.755799636 +0000 UTC m=+146.783174893 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.282452 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-46bkn" event={"ID":"4bab5b07-9cba-4b9f-9c8d-ce540f57347f","Type":"ContainerStarted","Data":"7dca58c9487a3c346c3b1e32dcfa4a82ef996a21eefa42f73ee1dc3aae830eb1"} Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.285243 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-pkcmd" podStartSLOduration=126.285230812 podStartE2EDuration="2m6.285230812s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:54.282085372 +0000 UTC m=+146.309460539" watchObservedRunningTime="2025-11-28 13:20:54.285230812 +0000 UTC m=+146.312605979" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.355699 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.356069 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-tz5lk" event={"ID":"faa6eb3a-4dd5-4e99-83da-fdd167db88e7","Type":"ContainerStarted","Data":"0e949a5fa27024ef296470bc20eb0fa33ec895927c35418980dcbf41c3bd0b32"} Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.356100 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-fx2d9" Nov 28 13:20:54 crc kubenswrapper[4857]: E1128 13:20:54.361683 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:54.861651848 +0000 UTC m=+146.889027015 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.361866 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.369567 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.371414 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:54 crc kubenswrapper[4857]: E1128 13:20:54.374259 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:54.874204208 +0000 UTC m=+146.901579385 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.473006 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:54 crc kubenswrapper[4857]: E1128 13:20:54.473227 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:54.973203022 +0000 UTC m=+147.000578209 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.473785 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:54 crc kubenswrapper[4857]: E1128 13:20:54.479649 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:54.979627537 +0000 UTC m=+147.007002704 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.576686 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:54 crc kubenswrapper[4857]: E1128 13:20:54.576863 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:55.076832 +0000 UTC m=+147.104207167 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.577479 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:54 crc kubenswrapper[4857]: E1128 13:20:54.578396 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:55.078360704 +0000 UTC m=+147.105735881 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.679244 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:54 crc kubenswrapper[4857]: E1128 13:20:54.679601 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:55.17953714 +0000 UTC m=+147.206912307 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.679823 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:54 crc kubenswrapper[4857]: E1128 13:20:54.680443 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:55.180419896 +0000 UTC m=+147.207795063 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.700991 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.706213 4857 patch_prober.go:28] interesting pod/router-default-5444994796-zbwhx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:20:54 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:20:54 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:20:54 crc kubenswrapper[4857]: healthz check failed Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.706328 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zbwhx" podUID="18a1ac68-146b-4c80-a763-df4b75e6698d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.868214 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-fx2d9" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.872359 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:54 crc kubenswrapper[4857]: E1128 13:20:54.873054 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:55.373024859 +0000 UTC m=+147.400400066 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.896514 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d" podStartSLOduration=126.896490814 podStartE2EDuration="2m6.896490814s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:54.895930888 +0000 UTC m=+146.923306065" watchObservedRunningTime="2025-11-28 13:20:54.896490814 +0000 UTC m=+146.923865981" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.966841 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-77ck4" podStartSLOduration=126.966815244 podStartE2EDuration="2m6.966815244s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:54.92317672 +0000 UTC m=+146.950551907" watchObservedRunningTime="2025-11-28 13:20:54.966815244 +0000 UTC m=+146.994190411" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.968362 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8tnz7" podStartSLOduration=125.968354958 podStartE2EDuration="2m5.968354958s" podCreationTimestamp="2025-11-28 13:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:54.965219748 +0000 UTC m=+146.992594925" watchObservedRunningTime="2025-11-28 13:20:54.968354958 +0000 UTC m=+146.995730125" Nov 28 13:20:54 crc kubenswrapper[4857]: I1128 13:20:54.974545 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:54 crc kubenswrapper[4857]: E1128 13:20:54.975016 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:55.475001889 +0000 UTC m=+147.502377056 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.032057 4857 patch_prober.go:28] interesting pod/downloads-7954f5f757-rc8cq container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.5:8080/\": dial tcp 10.217.0.5:8080: connect: connection refused" start-of-body= Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.032247 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-rc8cq" podUID="de8cf09d-8247-4f1f-bce9-01472e9ee181" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.5:8080/\": dial tcp 10.217.0.5:8080: connect: connection refused" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.032145 4857 patch_prober.go:28] interesting pod/downloads-7954f5f757-rc8cq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.5:8080/\": dial tcp 10.217.0.5:8080: connect: connection refused" start-of-body= Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.032438 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rc8cq" podUID="de8cf09d-8247-4f1f-bce9-01472e9ee181" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.5:8080/\": dial tcp 10.217.0.5:8080: connect: connection refused" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.041371 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-fqxp6" podStartSLOduration=127.041353556 podStartE2EDuration="2m7.041353556s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:55.00704674 +0000 UTC m=+147.034421907" watchObservedRunningTime="2025-11-28 13:20:55.041353556 +0000 UTC m=+147.068728723" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.041816 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rvh62" podStartSLOduration=126.041805319 podStartE2EDuration="2m6.041805319s" podCreationTimestamp="2025-11-28 13:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:55.040856591 +0000 UTC m=+147.068231758" watchObservedRunningTime="2025-11-28 13:20:55.041805319 +0000 UTC m=+147.069180486" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.049164 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-b678s"] Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.050306 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b678s" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.054310 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.068537 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-b678s"] Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.081568 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.082246 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd1f9d7f-303b-4372-8937-0a7b31e45355-catalog-content\") pod \"certified-operators-b678s\" (UID: \"fd1f9d7f-303b-4372-8937-0a7b31e45355\") " pod="openshift-marketplace/certified-operators-b678s" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.082374 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ph868\" (UniqueName: \"kubernetes.io/projected/fd1f9d7f-303b-4372-8937-0a7b31e45355-kube-api-access-ph868\") pod \"certified-operators-b678s\" (UID: \"fd1f9d7f-303b-4372-8937-0a7b31e45355\") " pod="openshift-marketplace/certified-operators-b678s" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.082487 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd1f9d7f-303b-4372-8937-0a7b31e45355-utilities\") pod \"certified-operators-b678s\" (UID: \"fd1f9d7f-303b-4372-8937-0a7b31e45355\") " pod="openshift-marketplace/certified-operators-b678s" Nov 28 13:20:55 crc kubenswrapper[4857]: E1128 13:20:55.082717 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:55.582685813 +0000 UTC m=+147.610061170 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.089563 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-46bkn" podStartSLOduration=127.08953029 podStartE2EDuration="2m7.08953029s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:55.088183231 +0000 UTC m=+147.115558398" watchObservedRunningTime="2025-11-28 13:20:55.08953029 +0000 UTC m=+147.116905457" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.123329 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-t9k6q" podStartSLOduration=127.12331241 podStartE2EDuration="2m7.12331241s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:55.122189198 +0000 UTC m=+147.149564365" watchObservedRunningTime="2025-11-28 13:20:55.12331241 +0000 UTC m=+147.150687577" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.158278 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-fx2d9" podStartSLOduration=127.158256774 podStartE2EDuration="2m7.158256774s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:55.153644162 +0000 UTC m=+147.181019349" watchObservedRunningTime="2025-11-28 13:20:55.158256774 +0000 UTC m=+147.185631941" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.185946 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.186046 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd1f9d7f-303b-4372-8937-0a7b31e45355-catalog-content\") pod \"certified-operators-b678s\" (UID: \"fd1f9d7f-303b-4372-8937-0a7b31e45355\") " pod="openshift-marketplace/certified-operators-b678s" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.186068 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ph868\" (UniqueName: \"kubernetes.io/projected/fd1f9d7f-303b-4372-8937-0a7b31e45355-kube-api-access-ph868\") pod \"certified-operators-b678s\" (UID: \"fd1f9d7f-303b-4372-8937-0a7b31e45355\") " pod="openshift-marketplace/certified-operators-b678s" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.186094 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd1f9d7f-303b-4372-8937-0a7b31e45355-utilities\") pod \"certified-operators-b678s\" (UID: \"fd1f9d7f-303b-4372-8937-0a7b31e45355\") " pod="openshift-marketplace/certified-operators-b678s" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.186559 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd1f9d7f-303b-4372-8937-0a7b31e45355-utilities\") pod \"certified-operators-b678s\" (UID: \"fd1f9d7f-303b-4372-8937-0a7b31e45355\") " pod="openshift-marketplace/certified-operators-b678s" Nov 28 13:20:55 crc kubenswrapper[4857]: E1128 13:20:55.186807 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:55.686792594 +0000 UTC m=+147.714167761 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.187324 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd1f9d7f-303b-4372-8937-0a7b31e45355-catalog-content\") pod \"certified-operators-b678s\" (UID: \"fd1f9d7f-303b-4372-8937-0a7b31e45355\") " pod="openshift-marketplace/certified-operators-b678s" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.212724 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" podStartSLOduration=126.212685438 podStartE2EDuration="2m6.212685438s" podCreationTimestamp="2025-11-28 13:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:55.210296649 +0000 UTC m=+147.237671816" watchObservedRunningTime="2025-11-28 13:20:55.212685438 +0000 UTC m=+147.240060605" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.249870 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-r2fq8"] Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.251263 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r2fq8" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.261902 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.264677 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-mfb8j" podStartSLOduration=126.264646991 podStartE2EDuration="2m6.264646991s" podCreationTimestamp="2025-11-28 13:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:55.249666301 +0000 UTC m=+147.277041468" watchObservedRunningTime="2025-11-28 13:20:55.264646991 +0000 UTC m=+147.292022158" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.281866 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r2fq8"] Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.285553 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ph868\" (UniqueName: \"kubernetes.io/projected/fd1f9d7f-303b-4372-8937-0a7b31e45355-kube-api-access-ph868\") pod \"certified-operators-b678s\" (UID: \"fd1f9d7f-303b-4372-8937-0a7b31e45355\") " pod="openshift-marketplace/certified-operators-b678s" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.287121 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.287845 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28962db4-abc0-431e-832c-01246a09d048-catalog-content\") pod \"community-operators-r2fq8\" (UID: \"28962db4-abc0-431e-832c-01246a09d048\") " pod="openshift-marketplace/community-operators-r2fq8" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.288037 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5dsks\" (UniqueName: \"kubernetes.io/projected/28962db4-abc0-431e-832c-01246a09d048-kube-api-access-5dsks\") pod \"community-operators-r2fq8\" (UID: \"28962db4-abc0-431e-832c-01246a09d048\") " pod="openshift-marketplace/community-operators-r2fq8" Nov 28 13:20:55 crc kubenswrapper[4857]: E1128 13:20:55.288294 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:55.78826915 +0000 UTC m=+147.815644317 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.289947 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28962db4-abc0-431e-832c-01246a09d048-utilities\") pod \"community-operators-r2fq8\" (UID: \"28962db4-abc0-431e-832c-01246a09d048\") " pod="openshift-marketplace/community-operators-r2fq8" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.361695 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-tz5lk" event={"ID":"faa6eb3a-4dd5-4e99-83da-fdd167db88e7","Type":"ContainerStarted","Data":"c085c25164e9712f595ddd434b9210d171bd08c12cc5be7866e1b27314090e34"} Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.367437 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qr8zs" event={"ID":"2010920b-32b8-4a16-9703-99191b3ac0ac","Type":"ContainerStarted","Data":"a1c2bd0ec467d09b173025ef81f048169eca0bb42240ba6002d76cf73f2f9bd8"} Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.369811 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" event={"ID":"0eafa688-6c78-44bc-93de-6e300a65a036","Type":"ContainerStarted","Data":"52b70e6da85f4ab3ca9c4cc9ce66767add145ed223dd6e0e67e4fb68f65d5dd7"} Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.370346 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.374217 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-tcbhr" event={"ID":"70c93db6-6e53-4870-92f3-e6335deb6936","Type":"ContainerStarted","Data":"af2ea7e3e8dc642738dc369cd0ac1ba1a2c289fd5ba3de2a7279b5da1fb844b8"} Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.374556 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-tcbhr" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.384202 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x" event={"ID":"e846921b-6840-420c-a782-fba505744883","Type":"ContainerStarted","Data":"1daa5a78ebb5e5d80e91ee8e29b9bc3f427602ecc680ec46ad9a05bf4cacf425"} Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.386977 4857 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-hzw48 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" start-of-body= Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.387162 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" podUID="0eafa688-6c78-44bc-93de-6e300a65a036" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.391354 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28962db4-abc0-431e-832c-01246a09d048-utilities\") pod \"community-operators-r2fq8\" (UID: \"28962db4-abc0-431e-832c-01246a09d048\") " pod="openshift-marketplace/community-operators-r2fq8" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.391480 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28962db4-abc0-431e-832c-01246a09d048-catalog-content\") pod \"community-operators-r2fq8\" (UID: \"28962db4-abc0-431e-832c-01246a09d048\") " pod="openshift-marketplace/community-operators-r2fq8" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.391580 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.391639 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5dsks\" (UniqueName: \"kubernetes.io/projected/28962db4-abc0-431e-832c-01246a09d048-kube-api-access-5dsks\") pod \"community-operators-r2fq8\" (UID: \"28962db4-abc0-431e-832c-01246a09d048\") " pod="openshift-marketplace/community-operators-r2fq8" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.393572 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28962db4-abc0-431e-832c-01246a09d048-utilities\") pod \"community-operators-r2fq8\" (UID: \"28962db4-abc0-431e-832c-01246a09d048\") " pod="openshift-marketplace/community-operators-r2fq8" Nov 28 13:20:55 crc kubenswrapper[4857]: E1128 13:20:55.395924 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:55.895902792 +0000 UTC m=+147.923277959 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.396385 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28962db4-abc0-431e-832c-01246a09d048-catalog-content\") pod \"community-operators-r2fq8\" (UID: \"28962db4-abc0-431e-832c-01246a09d048\") " pod="openshift-marketplace/community-operators-r2fq8" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.396736 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b678s" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.399731 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qr8zs" podStartSLOduration=126.399711651 podStartE2EDuration="2m6.399711651s" podCreationTimestamp="2025-11-28 13:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:55.396057066 +0000 UTC m=+147.423432233" watchObservedRunningTime="2025-11-28 13:20:55.399711651 +0000 UTC m=+147.427086808" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.404254 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj" event={"ID":"ca56a139-a9ad-46bf-a094-435eef021799","Type":"ContainerStarted","Data":"7dfa0aa269caee24b62a7ac3b94337b1d8f019ea9493f6927f4da85b061e3a27"} Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.413523 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-w9chp" event={"ID":"a0ce2b1a-313b-400a-b4e4-2b4b32b1785a","Type":"ContainerStarted","Data":"8e36cfabf3417a6f7b7ea745387f754145e63d2d75da38ed974461d875ebdbd7"} Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.425128 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-tcbhr" podStartSLOduration=11.425109761 podStartE2EDuration="11.425109761s" podCreationTimestamp="2025-11-28 13:20:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:55.420679604 +0000 UTC m=+147.448054791" watchObservedRunningTime="2025-11-28 13:20:55.425109761 +0000 UTC m=+147.452484918" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.441279 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jml4b"] Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.442266 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jml4b" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.444096 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5dsks\" (UniqueName: \"kubernetes.io/projected/28962db4-abc0-431e-832c-01246a09d048-kube-api-access-5dsks\") pod \"community-operators-r2fq8\" (UID: \"28962db4-abc0-431e-832c-01246a09d048\") " pod="openshift-marketplace/community-operators-r2fq8" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.455534 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jml4b"] Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.469071 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" podStartSLOduration=127.469044433 podStartE2EDuration="2m7.469044433s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:55.45708349 +0000 UTC m=+147.484458657" watchObservedRunningTime="2025-11-28 13:20:55.469044433 +0000 UTC m=+147.496419620" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.472213 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wlfw4" event={"ID":"916dbd05-6649-4ef0-9fdf-b2abe4ee3193","Type":"ContainerStarted","Data":"f5dd47434763e5df662bbfb2eed262027cbfa9d413179a14d4999ea964a9c561"} Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.492789 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:55 crc kubenswrapper[4857]: E1128 13:20:55.516834 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:56.016790545 +0000 UTC m=+148.044165712 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.518773 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.519074 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2grhj\" (UniqueName: \"kubernetes.io/projected/d34958af-3c7b-4821-8fa8-af2ec4591af5-kube-api-access-2grhj\") pod \"certified-operators-jml4b\" (UID: \"d34958af-3c7b-4821-8fa8-af2ec4591af5\") " pod="openshift-marketplace/certified-operators-jml4b" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.519366 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d34958af-3c7b-4821-8fa8-af2ec4591af5-catalog-content\") pod \"certified-operators-jml4b\" (UID: \"d34958af-3c7b-4821-8fa8-af2ec4591af5\") " pod="openshift-marketplace/certified-operators-jml4b" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.519649 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d34958af-3c7b-4821-8fa8-af2ec4591af5-utilities\") pod \"certified-operators-jml4b\" (UID: \"d34958af-3c7b-4821-8fa8-af2ec4591af5\") " pod="openshift-marketplace/certified-operators-jml4b" Nov 28 13:20:55 crc kubenswrapper[4857]: E1128 13:20:55.520809 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:56.020745409 +0000 UTC m=+148.048120576 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.560518 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rl4qj" podStartSLOduration=127.56046649 podStartE2EDuration="2m7.56046649s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:55.553789158 +0000 UTC m=+147.581164335" watchObservedRunningTime="2025-11-28 13:20:55.56046649 +0000 UTC m=+147.587841657" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.560912 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c544t" event={"ID":"7e845cce-5c0e-4fad-bd24-6ff321ea3c02","Type":"ContainerStarted","Data":"31314ba44f55f7bdc037c2eef838e29f24c49cc1e7e076dcbd6d877e57c9d7f1"} Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.598812 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c544t" podStartSLOduration=127.598784791 podStartE2EDuration="2m7.598784791s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:55.593010125 +0000 UTC m=+147.620385292" watchObservedRunningTime="2025-11-28 13:20:55.598784791 +0000 UTC m=+147.626159958" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.611495 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-l2hg8"] Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.613890 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r2fq8" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.624858 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.625085 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d34958af-3c7b-4821-8fa8-af2ec4591af5-utilities\") pod \"certified-operators-jml4b\" (UID: \"d34958af-3c7b-4821-8fa8-af2ec4591af5\") " pod="openshift-marketplace/certified-operators-jml4b" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.625185 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2grhj\" (UniqueName: \"kubernetes.io/projected/d34958af-3c7b-4821-8fa8-af2ec4591af5-kube-api-access-2grhj\") pod \"certified-operators-jml4b\" (UID: \"d34958af-3c7b-4821-8fa8-af2ec4591af5\") " pod="openshift-marketplace/certified-operators-jml4b" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.625219 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d34958af-3c7b-4821-8fa8-af2ec4591af5-catalog-content\") pod \"certified-operators-jml4b\" (UID: \"d34958af-3c7b-4821-8fa8-af2ec4591af5\") " pod="openshift-marketplace/certified-operators-jml4b" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.626296 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d34958af-3c7b-4821-8fa8-af2ec4591af5-utilities\") pod \"certified-operators-jml4b\" (UID: \"d34958af-3c7b-4821-8fa8-af2ec4591af5\") " pod="openshift-marketplace/certified-operators-jml4b" Nov 28 13:20:55 crc kubenswrapper[4857]: E1128 13:20:55.626579 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:56.126542938 +0000 UTC m=+148.153918305 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.628928 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l2hg8" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.637558 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d34958af-3c7b-4821-8fa8-af2ec4591af5-catalog-content\") pod \"certified-operators-jml4b\" (UID: \"d34958af-3c7b-4821-8fa8-af2ec4591af5\") " pod="openshift-marketplace/certified-operators-jml4b" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.644535 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-pxdz7" event={"ID":"eba66557-699a-4be9-bc8e-fcedf6155f7e","Type":"ContainerStarted","Data":"2224af3b444ba33952160e53b8f8249d9020072784e997050a199e6543d36e75"} Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.664738 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l2hg8"] Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.697438 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2grhj\" (UniqueName: \"kubernetes.io/projected/d34958af-3c7b-4821-8fa8-af2ec4591af5-kube-api-access-2grhj\") pod \"certified-operators-jml4b\" (UID: \"d34958af-3c7b-4821-8fa8-af2ec4591af5\") " pod="openshift-marketplace/certified-operators-jml4b" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.704591 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-gqtr2" event={"ID":"b008b6be-e0fb-4486-9543-1343d458badd","Type":"ContainerStarted","Data":"5ed2c6af60dccc7311b79ff90f464a0bf2176a62d54e0bdce14b7c49d895552c"} Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.704637 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-gqtr2" event={"ID":"b008b6be-e0fb-4486-9543-1343d458badd","Type":"ContainerStarted","Data":"c1b707e271ca488ce79614dbe3ed971104419a7a63f5394f81c1715b5a6275bb"} Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.709869 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-pxdz7" podStartSLOduration=126.709842022 podStartE2EDuration="2m6.709842022s" podCreationTimestamp="2025-11-28 13:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:55.702367077 +0000 UTC m=+147.729742264" watchObservedRunningTime="2025-11-28 13:20:55.709842022 +0000 UTC m=+147.737217199" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.721201 4857 patch_prober.go:28] interesting pod/router-default-5444994796-zbwhx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:20:55 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:20:55 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:20:55 crc kubenswrapper[4857]: healthz check failed Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.721254 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zbwhx" podUID="18a1ac68-146b-4c80-a763-df4b75e6698d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.729380 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.729762 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.729902 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.730059 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klczj\" (UniqueName: \"kubernetes.io/projected/f6949630-5993-404f-8177-fddca689d6b1-kube-api-access-klczj\") pod \"community-operators-l2hg8\" (UID: \"f6949630-5993-404f-8177-fddca689d6b1\") " pod="openshift-marketplace/community-operators-l2hg8" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.730207 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6949630-5993-404f-8177-fddca689d6b1-catalog-content\") pod \"community-operators-l2hg8\" (UID: \"f6949630-5993-404f-8177-fddca689d6b1\") " pod="openshift-marketplace/community-operators-l2hg8" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.730309 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.730419 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.730561 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6949630-5993-404f-8177-fddca689d6b1-utilities\") pod \"community-operators-l2hg8\" (UID: \"f6949630-5993-404f-8177-fddca689d6b1\") " pod="openshift-marketplace/community-operators-l2hg8" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.729649 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-gqtr2" podStartSLOduration=127.72962836 podStartE2EDuration="2m7.72962836s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:55.728381014 +0000 UTC m=+147.755756181" watchObservedRunningTime="2025-11-28 13:20:55.72962836 +0000 UTC m=+147.757003527" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.731920 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p" event={"ID":"d1c0a9ad-6310-4b36-82cf-775aad2a3232","Type":"ContainerStarted","Data":"8e6a266fd1c312adac732bd9a28114de45d4cca5d7845d95eddadc48b0672401"} Nov 28 13:20:55 crc kubenswrapper[4857]: E1128 13:20:55.732580 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:56.232563644 +0000 UTC m=+148.259938991 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.735467 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.737822 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.738916 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.754357 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.765795 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wn59p" podStartSLOduration=128.765737527 podStartE2EDuration="2m8.765737527s" podCreationTimestamp="2025-11-28 13:18:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:55.76374422 +0000 UTC m=+147.791119387" watchObservedRunningTime="2025-11-28 13:20:55.765737527 +0000 UTC m=+147.793112694" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.774522 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" event={"ID":"40c7002a-571a-4c01-bfb7-a6bbf316a615","Type":"ContainerStarted","Data":"33eeb6382e032f857e2d39313c3fd1232c375223cb85b4b83c478d73798ff279"} Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.826419 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.832368 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.832545 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6949630-5993-404f-8177-fddca689d6b1-catalog-content\") pod \"community-operators-l2hg8\" (UID: \"f6949630-5993-404f-8177-fddca689d6b1\") " pod="openshift-marketplace/community-operators-l2hg8" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.832634 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6949630-5993-404f-8177-fddca689d6b1-utilities\") pod \"community-operators-l2hg8\" (UID: \"f6949630-5993-404f-8177-fddca689d6b1\") " pod="openshift-marketplace/community-operators-l2hg8" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.832704 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klczj\" (UniqueName: \"kubernetes.io/projected/f6949630-5993-404f-8177-fddca689d6b1-kube-api-access-klczj\") pod \"community-operators-l2hg8\" (UID: \"f6949630-5993-404f-8177-fddca689d6b1\") " pod="openshift-marketplace/community-operators-l2hg8" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.832832 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.833268 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6949630-5993-404f-8177-fddca689d6b1-catalog-content\") pod \"community-operators-l2hg8\" (UID: \"f6949630-5993-404f-8177-fddca689d6b1\") " pod="openshift-marketplace/community-operators-l2hg8" Nov 28 13:20:55 crc kubenswrapper[4857]: E1128 13:20:55.832935 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:56.332913767 +0000 UTC m=+148.360288934 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.834913 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6949630-5993-404f-8177-fddca689d6b1-utilities\") pod \"community-operators-l2hg8\" (UID: \"f6949630-5993-404f-8177-fddca689d6b1\") " pod="openshift-marketplace/community-operators-l2hg8" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.850471 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2" event={"ID":"bfcb44a8-0cab-4938-9f92-0fcc4227a662","Type":"ContainerStarted","Data":"458d325d334a3c7600179e3edb2eba2fe0071b27940b33d14a0ab85890decb9d"} Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.851745 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.869468 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jml4b" Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.950152 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:55 crc kubenswrapper[4857]: E1128 13:20:55.952022 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:56.452009649 +0000 UTC m=+148.479384816 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:55 crc kubenswrapper[4857]: I1128 13:20:55.988987 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-hvs7l" event={"ID":"de0c7377-d49e-4651-b681-cddd455fd280","Type":"ContainerStarted","Data":"256ca71619f5dec8d7eaf2456c509088da7a39a210ab34d8de338a78c8427ee5"} Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.028855 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-244qv" event={"ID":"cd7d528d-143a-4093-a83a-510b9767a355","Type":"ContainerStarted","Data":"abd219d812669f17cf70260b9fd676fdc82774f7dfda8ec066fd7197ca0c7c46"} Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.034662 4857 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-5jcnx container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.034714 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" podUID="30934d71-ae7e-491a-933a-f1667b3608e4" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.037338 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klczj\" (UniqueName: \"kubernetes.io/projected/f6949630-5993-404f-8177-fddca689d6b1-kube-api-access-klczj\") pod \"community-operators-l2hg8\" (UID: \"f6949630-5993-404f-8177-fddca689d6b1\") " pod="openshift-marketplace/community-operators-l2hg8" Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.069139 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.069143 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-hvs7l" podStartSLOduration=127.069124434 podStartE2EDuration="2m7.069124434s" podCreationTimestamp="2025-11-28 13:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:56.068877747 +0000 UTC m=+148.096252914" watchObservedRunningTime="2025-11-28 13:20:56.069124434 +0000 UTC m=+148.096499601" Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.069479 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8jrs2" podStartSLOduration=127.069473824 podStartE2EDuration="2m7.069473824s" podCreationTimestamp="2025-11-28 13:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:55.949091095 +0000 UTC m=+147.976466262" watchObservedRunningTime="2025-11-28 13:20:56.069473824 +0000 UTC m=+148.096848991" Nov 28 13:20:56 crc kubenswrapper[4857]: E1128 13:20:56.070393 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:56.57037884 +0000 UTC m=+148.597754007 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.124370 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.147866 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-244qv" podStartSLOduration=128.147846405 podStartE2EDuration="2m8.147846405s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:56.14417164 +0000 UTC m=+148.171546827" watchObservedRunningTime="2025-11-28 13:20:56.147846405 +0000 UTC m=+148.175221572" Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.190994 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:56 crc kubenswrapper[4857]: E1128 13:20:56.191433 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:56.691419027 +0000 UTC m=+148.718794194 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.298331 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:56 crc kubenswrapper[4857]: E1128 13:20:56.298818 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:56.798798922 +0000 UTC m=+148.826174089 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.307033 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-b678s"] Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.307079 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l2hg8" Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.399779 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:56 crc kubenswrapper[4857]: E1128 13:20:56.400375 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:56.90034503 +0000 UTC m=+148.927720187 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.517425 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:56 crc kubenswrapper[4857]: E1128 13:20:56.517588 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:57.017554437 +0000 UTC m=+149.044929604 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.518217 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:56 crc kubenswrapper[4857]: E1128 13:20:56.518609 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:57.018581157 +0000 UTC m=+149.045956324 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.619326 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:56 crc kubenswrapper[4857]: E1128 13:20:56.619456 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:57.119439455 +0000 UTC m=+149.146814612 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.619537 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:56 crc kubenswrapper[4857]: E1128 13:20:56.619856 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:57.119845926 +0000 UTC m=+149.147221093 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.674556 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jml4b"] Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.705295 4857 patch_prober.go:28] interesting pod/router-default-5444994796-zbwhx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:20:56 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:20:56 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:20:56 crc kubenswrapper[4857]: healthz check failed Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.705372 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zbwhx" podUID="18a1ac68-146b-4c80-a763-df4b75e6698d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.720449 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:56 crc kubenswrapper[4857]: E1128 13:20:56.720971 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:57.220951201 +0000 UTC m=+149.248326368 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.765121 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l2hg8"] Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.791000 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r2fq8"] Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.822031 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:56 crc kubenswrapper[4857]: E1128 13:20:56.822526 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:57.322507199 +0000 UTC m=+149.349882366 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.923006 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:56 crc kubenswrapper[4857]: E1128 13:20:56.923446 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:57.423409108 +0000 UTC m=+149.450784275 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:56 crc kubenswrapper[4857]: I1128 13:20:56.923981 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:56 crc kubenswrapper[4857]: E1128 13:20:56.924585 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:57.424565341 +0000 UTC m=+149.451940508 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.026697 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:57 crc kubenswrapper[4857]: E1128 13:20:57.026915 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:57.526901001 +0000 UTC m=+149.554276168 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.032377 4857 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-fm4ct container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.032439 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" podUID="a1a89d9c-a1cb-4fbd-afb6-bb4d38de84ee" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.34:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.045108 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" event={"ID":"6d834626-adda-4075-9196-bbf271b7b785","Type":"ContainerStarted","Data":"fa751dd8c6f880972480666167f81c24b2aedf103c620008e93ef35b7190497a"} Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.049164 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"b6356539affaef711530646e63272a185651156abcebdbfd1ec2b535760a1a76"} Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.057253 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b678s" event={"ID":"fd1f9d7f-303b-4372-8937-0a7b31e45355","Type":"ContainerStarted","Data":"1c386439f77d9ed57c84302df4dd5942884675de400f092ce814d479f72e8cd8"} Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.074264 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.074305 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.075848 4857 patch_prober.go:28] interesting pod/console-f9d7485db-7plbl container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.075904 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-7plbl" podUID="fef72e7c-9edd-4a6f-8648-aaaf65497bb6" containerName="console" probeResult="failure" output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.085566 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"8e5d0378e945a474ca965bdfce29281b1fd486f14fb5dca12854d46fc5442666"} Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.104380 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" podStartSLOduration=128.104360146 podStartE2EDuration="2m8.104360146s" podCreationTimestamp="2025-11-28 13:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:57.103596334 +0000 UTC m=+149.130971501" watchObservedRunningTime="2025-11-28 13:20:57.104360146 +0000 UTC m=+149.131735313" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.105704 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"9dab4ac7afeb6535703180fad16106bb3ed9f350156ec58bd0b79c280b27e78d"} Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.109586 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r2fq8" event={"ID":"28962db4-abc0-431e-832c-01246a09d048","Type":"ContainerStarted","Data":"50cf0d8944057c5457a9daaa7a90efe605ae15f7e5bcaee6f673b2f4441eac31"} Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.125416 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2hg8" event={"ID":"f6949630-5993-404f-8177-fddca689d6b1","Type":"ContainerStarted","Data":"17783e178fab886a5994e153718a8d419f5cc4622bba7d13f2793397a7cfcde7"} Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.128067 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:57 crc kubenswrapper[4857]: E1128 13:20:57.129455 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:57.629434997 +0000 UTC m=+149.656810364 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.140052 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jml4b" event={"ID":"d34958af-3c7b-4821-8fa8-af2ec4591af5","Type":"ContainerStarted","Data":"1eef2ba9f93016fa339b5c1f7c5615002b5970b283af0230977badb6f726a2e7"} Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.144271 4857 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-5jcnx container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.144320 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" podUID="30934d71-ae7e-491a-933a-f1667b3608e4" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.192524 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-w9chp" podStartSLOduration=128.192504719 podStartE2EDuration="2m8.192504719s" podCreationTimestamp="2025-11-28 13:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:57.190707017 +0000 UTC m=+149.218082184" watchObservedRunningTime="2025-11-28 13:20:57.192504719 +0000 UTC m=+149.219879886" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.197507 4857 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-5jcnx container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.197558 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" podUID="30934d71-ae7e-491a-933a-f1667b3608e4" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.198012 4857 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-5jcnx container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.198094 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" podUID="30934d71-ae7e-491a-933a-f1667b3608e4" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.229860 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:57 crc kubenswrapper[4857]: E1128 13:20:57.230058 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:57.730006516 +0000 UTC m=+149.757381693 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.230715 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:57 crc kubenswrapper[4857]: E1128 13:20:57.238706 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:57.738691546 +0000 UTC m=+149.766066713 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.283261 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x" podStartSLOduration=129.283242606 podStartE2EDuration="2m9.283242606s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:57.255000024 +0000 UTC m=+149.282375191" watchObservedRunningTime="2025-11-28 13:20:57.283242606 +0000 UTC m=+149.310617773" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.284523 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-tz5lk" podStartSLOduration=128.284517602 podStartE2EDuration="2m8.284517602s" podCreationTimestamp="2025-11-28 13:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:57.28304769 +0000 UTC m=+149.310422857" watchObservedRunningTime="2025-11-28 13:20:57.284517602 +0000 UTC m=+149.311892769" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.332740 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:57 crc kubenswrapper[4857]: E1128 13:20:57.333233 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:57.833198041 +0000 UTC m=+149.860573268 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.333562 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:57 crc kubenswrapper[4857]: E1128 13:20:57.335073 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:57.835060074 +0000 UTC m=+149.862435241 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.420767 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fsv5j"] Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.421718 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fsv5j" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.425276 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.434242 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:57 crc kubenswrapper[4857]: E1128 13:20:57.434383 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:57.934362177 +0000 UTC m=+149.961737344 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.434463 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.434526 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd3db365-db2a-4a0b-9485-bd38e8da6614-catalog-content\") pod \"redhat-marketplace-fsv5j\" (UID: \"fd3db365-db2a-4a0b-9485-bd38e8da6614\") " pod="openshift-marketplace/redhat-marketplace-fsv5j" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.434558 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkngh\" (UniqueName: \"kubernetes.io/projected/fd3db365-db2a-4a0b-9485-bd38e8da6614-kube-api-access-wkngh\") pod \"redhat-marketplace-fsv5j\" (UID: \"fd3db365-db2a-4a0b-9485-bd38e8da6614\") " pod="openshift-marketplace/redhat-marketplace-fsv5j" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.434604 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd3db365-db2a-4a0b-9485-bd38e8da6614-utilities\") pod \"redhat-marketplace-fsv5j\" (UID: \"fd3db365-db2a-4a0b-9485-bd38e8da6614\") " pod="openshift-marketplace/redhat-marketplace-fsv5j" Nov 28 13:20:57 crc kubenswrapper[4857]: E1128 13:20:57.434722 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:57.934714648 +0000 UTC m=+149.962089915 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.452584 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fsv5j"] Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.485435 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fm4ct" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.502027 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.535628 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.535789 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd3db365-db2a-4a0b-9485-bd38e8da6614-catalog-content\") pod \"redhat-marketplace-fsv5j\" (UID: \"fd3db365-db2a-4a0b-9485-bd38e8da6614\") " pod="openshift-marketplace/redhat-marketplace-fsv5j" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.535832 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkngh\" (UniqueName: \"kubernetes.io/projected/fd3db365-db2a-4a0b-9485-bd38e8da6614-kube-api-access-wkngh\") pod \"redhat-marketplace-fsv5j\" (UID: \"fd3db365-db2a-4a0b-9485-bd38e8da6614\") " pod="openshift-marketplace/redhat-marketplace-fsv5j" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.535863 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd3db365-db2a-4a0b-9485-bd38e8da6614-utilities\") pod \"redhat-marketplace-fsv5j\" (UID: \"fd3db365-db2a-4a0b-9485-bd38e8da6614\") " pod="openshift-marketplace/redhat-marketplace-fsv5j" Nov 28 13:20:57 crc kubenswrapper[4857]: E1128 13:20:57.535898 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:58.035875974 +0000 UTC m=+150.063251141 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.535949 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:57 crc kubenswrapper[4857]: E1128 13:20:57.536255 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:58.036242594 +0000 UTC m=+150.063617761 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.536684 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd3db365-db2a-4a0b-9485-bd38e8da6614-catalog-content\") pod \"redhat-marketplace-fsv5j\" (UID: \"fd3db365-db2a-4a0b-9485-bd38e8da6614\") " pod="openshift-marketplace/redhat-marketplace-fsv5j" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.536834 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd3db365-db2a-4a0b-9485-bd38e8da6614-utilities\") pod \"redhat-marketplace-fsv5j\" (UID: \"fd3db365-db2a-4a0b-9485-bd38e8da6614\") " pod="openshift-marketplace/redhat-marketplace-fsv5j" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.582716 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkngh\" (UniqueName: \"kubernetes.io/projected/fd3db365-db2a-4a0b-9485-bd38e8da6614-kube-api-access-wkngh\") pod \"redhat-marketplace-fsv5j\" (UID: \"fd3db365-db2a-4a0b-9485-bd38e8da6614\") " pod="openshift-marketplace/redhat-marketplace-fsv5j" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.636529 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:57 crc kubenswrapper[4857]: E1128 13:20:57.636919 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:58.136893406 +0000 UTC m=+150.164268573 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.637282 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:57 crc kubenswrapper[4857]: E1128 13:20:57.637582 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:58.137570426 +0000 UTC m=+150.164945593 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.679850 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.679892 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.698727 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.702709 4857 patch_prober.go:28] interesting pod/router-default-5444994796-zbwhx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:20:57 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:20:57 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:20:57 crc kubenswrapper[4857]: healthz check failed Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.702794 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zbwhx" podUID="18a1ac68-146b-4c80-a763-df4b75e6698d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.733761 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fsv5j" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.737929 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:57 crc kubenswrapper[4857]: E1128 13:20:57.738078 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:58.238053702 +0000 UTC m=+150.265428869 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.738183 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:57 crc kubenswrapper[4857]: E1128 13:20:57.738687 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:58.238654029 +0000 UTC m=+150.266029206 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.827161 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-v6x5g"] Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.828272 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v6x5g" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.850554 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.850922 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-v6x5g"] Nov 28 13:20:57 crc kubenswrapper[4857]: E1128 13:20:57.851334 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:58.351307765 +0000 UTC m=+150.378682932 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.870928 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.871491 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.884915 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.889184 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.919368 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.960404 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ff7e348f-f4c7-417d-adfc-55811357de88-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ff7e348f-f4c7-417d-adfc-55811357de88\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.960648 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.960675 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ff7e348f-f4c7-417d-adfc-55811357de88-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ff7e348f-f4c7-417d-adfc-55811357de88\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.960703 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ad31e12-d359-4a81-8e56-5431c271c7ce-catalog-content\") pod \"redhat-marketplace-v6x5g\" (UID: \"5ad31e12-d359-4a81-8e56-5431c271c7ce\") " pod="openshift-marketplace/redhat-marketplace-v6x5g" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.960742 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ad31e12-d359-4a81-8e56-5431c271c7ce-utilities\") pod \"redhat-marketplace-v6x5g\" (UID: \"5ad31e12-d359-4a81-8e56-5431c271c7ce\") " pod="openshift-marketplace/redhat-marketplace-v6x5g" Nov 28 13:20:57 crc kubenswrapper[4857]: I1128 13:20:57.960802 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrtj5\" (UniqueName: \"kubernetes.io/projected/5ad31e12-d359-4a81-8e56-5431c271c7ce-kube-api-access-wrtj5\") pod \"redhat-marketplace-v6x5g\" (UID: \"5ad31e12-d359-4a81-8e56-5431c271c7ce\") " pod="openshift-marketplace/redhat-marketplace-v6x5g" Nov 28 13:20:57 crc kubenswrapper[4857]: E1128 13:20:57.961120 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:58.46110894 +0000 UTC m=+150.488484107 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.068163 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:58 crc kubenswrapper[4857]: E1128 13:20:58.068258 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:58.568237348 +0000 UTC m=+150.595612515 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.068457 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrtj5\" (UniqueName: \"kubernetes.io/projected/5ad31e12-d359-4a81-8e56-5431c271c7ce-kube-api-access-wrtj5\") pod \"redhat-marketplace-v6x5g\" (UID: \"5ad31e12-d359-4a81-8e56-5431c271c7ce\") " pod="openshift-marketplace/redhat-marketplace-v6x5g" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.068482 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ff7e348f-f4c7-417d-adfc-55811357de88-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ff7e348f-f4c7-417d-adfc-55811357de88\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.068500 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.068518 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ff7e348f-f4c7-417d-adfc-55811357de88-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ff7e348f-f4c7-417d-adfc-55811357de88\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.068543 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ad31e12-d359-4a81-8e56-5431c271c7ce-catalog-content\") pod \"redhat-marketplace-v6x5g\" (UID: \"5ad31e12-d359-4a81-8e56-5431c271c7ce\") " pod="openshift-marketplace/redhat-marketplace-v6x5g" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.068574 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ad31e12-d359-4a81-8e56-5431c271c7ce-utilities\") pod \"redhat-marketplace-v6x5g\" (UID: \"5ad31e12-d359-4a81-8e56-5431c271c7ce\") " pod="openshift-marketplace/redhat-marketplace-v6x5g" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.068991 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ad31e12-d359-4a81-8e56-5431c271c7ce-utilities\") pod \"redhat-marketplace-v6x5g\" (UID: \"5ad31e12-d359-4a81-8e56-5431c271c7ce\") " pod="openshift-marketplace/redhat-marketplace-v6x5g" Nov 28 13:20:58 crc kubenswrapper[4857]: E1128 13:20:58.069478 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:58.569470613 +0000 UTC m=+150.596845780 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.069507 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ff7e348f-f4c7-417d-adfc-55811357de88-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ff7e348f-f4c7-417d-adfc-55811357de88\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.069704 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ad31e12-d359-4a81-8e56-5431c271c7ce-catalog-content\") pod \"redhat-marketplace-v6x5g\" (UID: \"5ad31e12-d359-4a81-8e56-5431c271c7ce\") " pod="openshift-marketplace/redhat-marketplace-v6x5g" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.098434 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrtj5\" (UniqueName: \"kubernetes.io/projected/5ad31e12-d359-4a81-8e56-5431c271c7ce-kube-api-access-wrtj5\") pod \"redhat-marketplace-v6x5g\" (UID: \"5ad31e12-d359-4a81-8e56-5431c271c7ce\") " pod="openshift-marketplace/redhat-marketplace-v6x5g" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.129371 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ff7e348f-f4c7-417d-adfc-55811357de88-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ff7e348f-f4c7-417d-adfc-55811357de88\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.169483 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:58 crc kubenswrapper[4857]: E1128 13:20:58.169889 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:58.669870398 +0000 UTC m=+150.697245565 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.228732 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wxvfq"] Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.229927 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wxvfq" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.236002 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v6x5g" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.238457 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" event={"ID":"03986f47-8037-41dd-a995-684a9296a676","Type":"ContainerStarted","Data":"cea1e6c25e11f136e9f427c7a1f5fb8c72a216974ba66b9b1707d7686b07504a"} Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.240562 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.259850 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2hg8" event={"ID":"f6949630-5993-404f-8177-fddca689d6b1","Type":"ContainerStarted","Data":"e4039bf0ffa4b0f8bf7682bfcfedfe526fda9bed9cb2a64706bcd78b12319931"} Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.262867 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" event={"ID":"40c7002a-571a-4c01-bfb7-a6bbf316a615","Type":"ContainerStarted","Data":"acb2f150e131d6353c4c17e01df875ed3229da3a7d647a95496d19c9410d12e6"} Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.263674 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"c5f8723cff94ffe9b8f2e2e1ac1f77a05a39a8373b413b2f3f97a5ef5f27b74c"} Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.264473 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"330f48e0701b9c36211f6c1803b81a9b89e4e9b24cfc35b7ea66ec584b1dee69"} Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.269261 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"200d7335dee7b89323ae51a35dddc3d8bd0fe1378cb61a2a7d216f862f262db0"} Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.270479 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:58 crc kubenswrapper[4857]: E1128 13:20:58.270797 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:58.770786877 +0000 UTC m=+150.798162044 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.275728 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wxvfq"] Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.279154 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r2fq8" event={"ID":"28962db4-abc0-431e-832c-01246a09d048","Type":"ContainerStarted","Data":"9f0cfe541d98be84834e53201766d3a7fbc0c3557230adb129da9ec7b32e7bf0"} Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.288149 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.299990 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b678s" event={"ID":"fd1f9d7f-303b-4372-8937-0a7b31e45355","Type":"ContainerStarted","Data":"1dce511039c1734f457a25431f52922ed7dd42c1cbfd3370a4fd4c59dc384093"} Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.345823 4857 generic.go:334] "Generic (PLEG): container finished" podID="f2860ffb-7c3e-488f-af31-bfb8609a67d4" containerID="5f9e04216adfa76c3ee316738d6e01322a2b2ccc741c8869c2d98b1d89763193" exitCode=0 Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.346780 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d" event={"ID":"f2860ffb-7c3e-488f-af31-bfb8609a67d4","Type":"ContainerDied","Data":"5f9e04216adfa76c3ee316738d6e01322a2b2ccc741c8869c2d98b1d89763193"} Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.363001 4857 generic.go:334] "Generic (PLEG): container finished" podID="d34958af-3c7b-4821-8fa8-af2ec4591af5" containerID="2247befdc773cb92ee44212383dd910c2c8c68767e9526b3232922a8ed757d1e" exitCode=0 Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.364103 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jml4b" event={"ID":"d34958af-3c7b-4821-8fa8-af2ec4591af5","Type":"ContainerDied","Data":"2247befdc773cb92ee44212383dd910c2c8c68767e9526b3232922a8ed757d1e"} Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.365017 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" podStartSLOduration=130.365006524 podStartE2EDuration="2m10.365006524s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:20:58.323848342 +0000 UTC m=+150.351223509" watchObservedRunningTime="2025-11-28 13:20:58.365006524 +0000 UTC m=+150.392381691" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.370732 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.371224 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fsv5j"] Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.371579 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.371913 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f760076-0358-4b0e-9b50-0d3d05d29a0e-catalog-content\") pod \"redhat-operators-wxvfq\" (UID: \"7f760076-0358-4b0e-9b50-0d3d05d29a0e\") " pod="openshift-marketplace/redhat-operators-wxvfq" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.371941 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fj78\" (UniqueName: \"kubernetes.io/projected/7f760076-0358-4b0e-9b50-0d3d05d29a0e-kube-api-access-2fj78\") pod \"redhat-operators-wxvfq\" (UID: \"7f760076-0358-4b0e-9b50-0d3d05d29a0e\") " pod="openshift-marketplace/redhat-operators-wxvfq" Nov 28 13:20:58 crc kubenswrapper[4857]: E1128 13:20:58.372071 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:58.872056757 +0000 UTC m=+150.899431914 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.372153 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f760076-0358-4b0e-9b50-0d3d05d29a0e-utilities\") pod \"redhat-operators-wxvfq\" (UID: \"7f760076-0358-4b0e-9b50-0d3d05d29a0e\") " pod="openshift-marketplace/redhat-operators-wxvfq" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.373532 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:58 crc kubenswrapper[4857]: E1128 13:20:58.374810 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:58.874791885 +0000 UTC m=+150.902167142 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.418787 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vwsxc"] Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.420115 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vwsxc" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.431320 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vwsxc"] Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.504110 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.504866 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7-utilities\") pod \"redhat-operators-vwsxc\" (UID: \"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7\") " pod="openshift-marketplace/redhat-operators-vwsxc" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.504931 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f760076-0358-4b0e-9b50-0d3d05d29a0e-catalog-content\") pod \"redhat-operators-wxvfq\" (UID: \"7f760076-0358-4b0e-9b50-0d3d05d29a0e\") " pod="openshift-marketplace/redhat-operators-wxvfq" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.504978 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fj78\" (UniqueName: \"kubernetes.io/projected/7f760076-0358-4b0e-9b50-0d3d05d29a0e-kube-api-access-2fj78\") pod \"redhat-operators-wxvfq\" (UID: \"7f760076-0358-4b0e-9b50-0d3d05d29a0e\") " pod="openshift-marketplace/redhat-operators-wxvfq" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.504997 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f760076-0358-4b0e-9b50-0d3d05d29a0e-utilities\") pod \"redhat-operators-wxvfq\" (UID: \"7f760076-0358-4b0e-9b50-0d3d05d29a0e\") " pod="openshift-marketplace/redhat-operators-wxvfq" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.505032 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tq8w\" (UniqueName: \"kubernetes.io/projected/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7-kube-api-access-7tq8w\") pod \"redhat-operators-vwsxc\" (UID: \"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7\") " pod="openshift-marketplace/redhat-operators-vwsxc" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.505058 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7-catalog-content\") pod \"redhat-operators-vwsxc\" (UID: \"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7\") " pod="openshift-marketplace/redhat-operators-vwsxc" Nov 28 13:20:58 crc kubenswrapper[4857]: E1128 13:20:58.506058 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:59.006043236 +0000 UTC m=+151.033418403 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.521430 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f760076-0358-4b0e-9b50-0d3d05d29a0e-catalog-content\") pod \"redhat-operators-wxvfq\" (UID: \"7f760076-0358-4b0e-9b50-0d3d05d29a0e\") " pod="openshift-marketplace/redhat-operators-wxvfq" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.538187 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f760076-0358-4b0e-9b50-0d3d05d29a0e-utilities\") pod \"redhat-operators-wxvfq\" (UID: \"7f760076-0358-4b0e-9b50-0d3d05d29a0e\") " pod="openshift-marketplace/redhat-operators-wxvfq" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.571788 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fj78\" (UniqueName: \"kubernetes.io/projected/7f760076-0358-4b0e-9b50-0d3d05d29a0e-kube-api-access-2fj78\") pod \"redhat-operators-wxvfq\" (UID: \"7f760076-0358-4b0e-9b50-0d3d05d29a0e\") " pod="openshift-marketplace/redhat-operators-wxvfq" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.608385 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7-utilities\") pod \"redhat-operators-vwsxc\" (UID: \"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7\") " pod="openshift-marketplace/redhat-operators-vwsxc" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.608442 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tq8w\" (UniqueName: \"kubernetes.io/projected/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7-kube-api-access-7tq8w\") pod \"redhat-operators-vwsxc\" (UID: \"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7\") " pod="openshift-marketplace/redhat-operators-vwsxc" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.608470 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7-catalog-content\") pod \"redhat-operators-vwsxc\" (UID: \"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7\") " pod="openshift-marketplace/redhat-operators-vwsxc" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.608488 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:58 crc kubenswrapper[4857]: E1128 13:20:58.608795 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:59.108775188 +0000 UTC m=+151.136150355 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.609294 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7-catalog-content\") pod \"redhat-operators-vwsxc\" (UID: \"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7\") " pod="openshift-marketplace/redhat-operators-vwsxc" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.609522 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7-utilities\") pod \"redhat-operators-vwsxc\" (UID: \"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7\") " pod="openshift-marketplace/redhat-operators-vwsxc" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.664873 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tq8w\" (UniqueName: \"kubernetes.io/projected/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7-kube-api-access-7tq8w\") pod \"redhat-operators-vwsxc\" (UID: \"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7\") " pod="openshift-marketplace/redhat-operators-vwsxc" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.709564 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:58 crc kubenswrapper[4857]: E1128 13:20:58.710155 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:59.21014034 +0000 UTC m=+151.237515507 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.713324 4857 patch_prober.go:28] interesting pod/router-default-5444994796-zbwhx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:20:58 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:20:58 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:20:58 crc kubenswrapper[4857]: healthz check failed Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.713363 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zbwhx" podUID="18a1ac68-146b-4c80-a763-df4b75e6698d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.813402 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.813600 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 13:20:58 crc kubenswrapper[4857]: E1128 13:20:58.813843 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:59.313832029 +0000 UTC m=+151.341207186 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.825065 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vwsxc" Nov 28 13:20:58 crc kubenswrapper[4857]: W1128 13:20:58.853736 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podff7e348f_f4c7_417d_adfc_55811357de88.slice/crio-032b2fe864512f1d8fc00e6ad5870092441e555961d3575c272d4effa46fe12b WatchSource:0}: Error finding container 032b2fe864512f1d8fc00e6ad5870092441e555961d3575c272d4effa46fe12b: Status 404 returned error can't find the container with id 032b2fe864512f1d8fc00e6ad5870092441e555961d3575c272d4effa46fe12b Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.867451 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wxvfq" Nov 28 13:20:58 crc kubenswrapper[4857]: I1128 13:20:58.914041 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:58 crc kubenswrapper[4857]: E1128 13:20:58.914336 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:59.414321926 +0000 UTC m=+151.441697093 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.003519 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-v6x5g"] Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.016024 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:59 crc kubenswrapper[4857]: E1128 13:20:59.016556 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:59.516544623 +0000 UTC m=+151.543919790 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.119347 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:59 crc kubenswrapper[4857]: E1128 13:20:59.119818 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:59.6198002 +0000 UTC m=+151.647175367 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.120240 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:59 crc kubenswrapper[4857]: E1128 13:20:59.120633 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:59.620623823 +0000 UTC m=+151.647998990 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.134563 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x" Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.149580 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fvj5x" Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.230761 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:59 crc kubenswrapper[4857]: E1128 13:20:59.231935 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:59.73190157 +0000 UTC m=+151.759276747 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.266957 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vwsxc"] Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.333175 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:59 crc kubenswrapper[4857]: E1128 13:20:59.333558 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:59.833542791 +0000 UTC m=+151.860917958 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.405549 4857 generic.go:334] "Generic (PLEG): container finished" podID="28962db4-abc0-431e-832c-01246a09d048" containerID="9f0cfe541d98be84834e53201766d3a7fbc0c3557230adb129da9ec7b32e7bf0" exitCode=0 Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.405833 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r2fq8" event={"ID":"28962db4-abc0-431e-832c-01246a09d048","Type":"ContainerDied","Data":"9f0cfe541d98be84834e53201766d3a7fbc0c3557230adb129da9ec7b32e7bf0"} Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.416299 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fsv5j" event={"ID":"fd3db365-db2a-4a0b-9485-bd38e8da6614","Type":"ContainerStarted","Data":"e5c9816c462b4de8223ab2a3f5344f939e9212b1e587755d3ec821c80aa3a911"} Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.418642 4857 generic.go:334] "Generic (PLEG): container finished" podID="f6949630-5993-404f-8177-fddca689d6b1" containerID="e4039bf0ffa4b0f8bf7682bfcfedfe526fda9bed9cb2a64706bcd78b12319931" exitCode=0 Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.418686 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2hg8" event={"ID":"f6949630-5993-404f-8177-fddca689d6b1","Type":"ContainerDied","Data":"e4039bf0ffa4b0f8bf7682bfcfedfe526fda9bed9cb2a64706bcd78b12319931"} Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.419511 4857 generic.go:334] "Generic (PLEG): container finished" podID="fd1f9d7f-303b-4372-8937-0a7b31e45355" containerID="1dce511039c1734f457a25431f52922ed7dd42c1cbfd3370a4fd4c59dc384093" exitCode=0 Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.419543 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b678s" event={"ID":"fd1f9d7f-303b-4372-8937-0a7b31e45355","Type":"ContainerDied","Data":"1dce511039c1734f457a25431f52922ed7dd42c1cbfd3370a4fd4c59dc384093"} Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.420318 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"ff7e348f-f4c7-417d-adfc-55811357de88","Type":"ContainerStarted","Data":"032b2fe864512f1d8fc00e6ad5870092441e555961d3575c272d4effa46fe12b"} Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.421202 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v6x5g" event={"ID":"5ad31e12-d359-4a81-8e56-5431c271c7ce","Type":"ContainerStarted","Data":"26871187fa4b729ef2c700ea86ac0fb57b6300235f06c5c4be7c1aa26df55d81"} Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.433806 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:59 crc kubenswrapper[4857]: E1128 13:20:59.434018 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:20:59.933992787 +0000 UTC m=+151.961367954 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.434096 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:59 crc kubenswrapper[4857]: E1128 13:20:59.434492 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:20:59.934484331 +0000 UTC m=+151.961859488 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.535557 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:59 crc kubenswrapper[4857]: E1128 13:20:59.536719 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:21:00.036696757 +0000 UTC m=+152.064071924 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.637983 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:59 crc kubenswrapper[4857]: E1128 13:20:59.638353 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:21:00.138338577 +0000 UTC m=+152.165713744 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.701703 4857 patch_prober.go:28] interesting pod/router-default-5444994796-zbwhx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:20:59 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:20:59 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:20:59 crc kubenswrapper[4857]: healthz check failed Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.701766 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zbwhx" podUID="18a1ac68-146b-4c80-a763-df4b75e6698d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.741063 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:59 crc kubenswrapper[4857]: E1128 13:20:59.741627 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:21:00.241599974 +0000 UTC m=+152.268975131 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.741726 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:59 crc kubenswrapper[4857]: E1128 13:20:59.742278 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:21:00.242270504 +0000 UTC m=+152.269645671 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.799717 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wxvfq"] Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.801428 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d" Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.843083 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qmwcv\" (UniqueName: \"kubernetes.io/projected/f2860ffb-7c3e-488f-af31-bfb8609a67d4-kube-api-access-qmwcv\") pod \"f2860ffb-7c3e-488f-af31-bfb8609a67d4\" (UID: \"f2860ffb-7c3e-488f-af31-bfb8609a67d4\") " Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.843241 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.843314 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f2860ffb-7c3e-488f-af31-bfb8609a67d4-secret-volume\") pod \"f2860ffb-7c3e-488f-af31-bfb8609a67d4\" (UID: \"f2860ffb-7c3e-488f-af31-bfb8609a67d4\") " Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.843377 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f2860ffb-7c3e-488f-af31-bfb8609a67d4-config-volume\") pod \"f2860ffb-7c3e-488f-af31-bfb8609a67d4\" (UID: \"f2860ffb-7c3e-488f-af31-bfb8609a67d4\") " Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.844582 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2860ffb-7c3e-488f-af31-bfb8609a67d4-config-volume" (OuterVolumeSpecName: "config-volume") pod "f2860ffb-7c3e-488f-af31-bfb8609a67d4" (UID: "f2860ffb-7c3e-488f-af31-bfb8609a67d4"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:20:59 crc kubenswrapper[4857]: E1128 13:20:59.844692 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:21:00.344672286 +0000 UTC m=+152.372047453 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.850023 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2860ffb-7c3e-488f-af31-bfb8609a67d4-kube-api-access-qmwcv" (OuterVolumeSpecName: "kube-api-access-qmwcv") pod "f2860ffb-7c3e-488f-af31-bfb8609a67d4" (UID: "f2860ffb-7c3e-488f-af31-bfb8609a67d4"). InnerVolumeSpecName "kube-api-access-qmwcv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.859227 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2860ffb-7c3e-488f-af31-bfb8609a67d4-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f2860ffb-7c3e-488f-af31-bfb8609a67d4" (UID: "f2860ffb-7c3e-488f-af31-bfb8609a67d4"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.918005 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.922705 4857 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.944539 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.944655 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qmwcv\" (UniqueName: \"kubernetes.io/projected/f2860ffb-7c3e-488f-af31-bfb8609a67d4-kube-api-access-qmwcv\") on node \"crc\" DevicePath \"\"" Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.944674 4857 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f2860ffb-7c3e-488f-af31-bfb8609a67d4-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 13:20:59 crc kubenswrapper[4857]: I1128 13:20:59.944686 4857 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f2860ffb-7c3e-488f-af31-bfb8609a67d4-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 13:20:59 crc kubenswrapper[4857]: E1128 13:20:59.944972 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:21:00.444959067 +0000 UTC m=+152.472334234 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.045393 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:21:00 crc kubenswrapper[4857]: E1128 13:21:00.045881 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:21:00.545864936 +0000 UTC m=+152.573240103 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.147333 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:21:00 crc kubenswrapper[4857]: E1128 13:21:00.147683 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:21:00.647669341 +0000 UTC m=+152.675044508 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.249183 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:21:00 crc kubenswrapper[4857]: E1128 13:21:00.250086 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:21:00.750060773 +0000 UTC m=+152.777435960 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.351075 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:21:00 crc kubenswrapper[4857]: E1128 13:21:00.351474 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:21:00.851457576 +0000 UTC m=+152.878832743 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.426266 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwsxc" event={"ID":"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7","Type":"ContainerStarted","Data":"1ff10c3647ec28a2712f32a1ccbabc00d3cb32a55e0b94ff66fb76342e4ff3ea"} Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.427321 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxvfq" event={"ID":"7f760076-0358-4b0e-9b50-0d3d05d29a0e","Type":"ContainerStarted","Data":"325d487328be1cc2bf55a055253658f31a551b877d6704ee85b76eaa925093b6"} Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.429177 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.429523 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405595-dbc4d" event={"ID":"f2860ffb-7c3e-488f-af31-bfb8609a67d4","Type":"ContainerDied","Data":"495a98c5746bf76d723550d7dab4971d7eb076a75abaa3f3921787e28aaa52de"} Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.429537 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="495a98c5746bf76d723550d7dab4971d7eb076a75abaa3f3921787e28aaa52de" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.430453 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.440371 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-j8zwm" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.452584 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:21:00 crc kubenswrapper[4857]: E1128 13:21:00.452775 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:21:00.952734005 +0000 UTC m=+152.980109182 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.453016 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:21:00 crc kubenswrapper[4857]: E1128 13:21:00.454332 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:21:00.95428408 +0000 UTC m=+152.981659447 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-t448t" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.511144 4857 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-28T13:20:59.922730198Z","Handler":null,"Name":""} Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.521283 4857 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.521604 4857 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.557062 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.563464 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.657720 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.690903 4857 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.690958 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.703047 4857 patch_prober.go:28] interesting pod/router-default-5444994796-zbwhx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:21:00 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:21:00 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:21:00 crc kubenswrapper[4857]: healthz check failed Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.703116 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zbwhx" podUID="18a1ac68-146b-4c80-a763-df4b75e6698d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.798205 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 13:21:00 crc kubenswrapper[4857]: E1128 13:21:00.798875 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2860ffb-7c3e-488f-af31-bfb8609a67d4" containerName="collect-profiles" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.799026 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2860ffb-7c3e-488f-af31-bfb8609a67d4" containerName="collect-profiles" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.799363 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2860ffb-7c3e-488f-af31-bfb8609a67d4" containerName="collect-profiles" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.800107 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.802377 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.802574 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.812532 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.860899 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.861000 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.961672 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.961792 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.961884 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:21:00 crc kubenswrapper[4857]: I1128 13:21:00.986397 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:21:01 crc kubenswrapper[4857]: I1128 13:21:01.117980 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:21:01 crc kubenswrapper[4857]: I1128 13:21:01.295061 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 13:21:01 crc kubenswrapper[4857]: I1128 13:21:01.436640 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b","Type":"ContainerStarted","Data":"d15533417d20081e10fc39e206ee5c4c2f117715a424267212ad9b9143ad9389"} Nov 28 13:21:01 crc kubenswrapper[4857]: I1128 13:21:01.701061 4857 patch_prober.go:28] interesting pod/router-default-5444994796-zbwhx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:21:01 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:21:01 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:21:01 crc kubenswrapper[4857]: healthz check failed Nov 28 13:21:01 crc kubenswrapper[4857]: I1128 13:21:01.701123 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zbwhx" podUID="18a1ac68-146b-4c80-a763-df4b75e6698d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:21:02 crc kubenswrapper[4857]: I1128 13:21:02.194848 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-t448t\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:21:02 crc kubenswrapper[4857]: I1128 13:21:02.240780 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-tcbhr" Nov 28 13:21:02 crc kubenswrapper[4857]: I1128 13:21:02.273955 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:21:02 crc kubenswrapper[4857]: I1128 13:21:02.320345 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 28 13:21:02 crc kubenswrapper[4857]: I1128 13:21:02.445565 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwsxc" event={"ID":"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7","Type":"ContainerStarted","Data":"9df2df3deee81e06ae3a9a1c744f1d3181a13bb5af59a2e1f7a3db02c8816fdb"} Nov 28 13:21:02 crc kubenswrapper[4857]: I1128 13:21:02.447846 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fsv5j" event={"ID":"fd3db365-db2a-4a0b-9485-bd38e8da6614","Type":"ContainerStarted","Data":"95bbb8e50a2ec7da2dbeec300e56fe9a39aea254e94d701d5bcfd6d99cd1cfe0"} Nov 28 13:21:02 crc kubenswrapper[4857]: I1128 13:21:02.451134 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" event={"ID":"03986f47-8037-41dd-a995-684a9296a676","Type":"ContainerStarted","Data":"b8a18021831cafb0cd7a9072508cab882ff0eae5a478f22bc668f5c1f88eefb7"} Nov 28 13:21:02 crc kubenswrapper[4857]: I1128 13:21:02.452543 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"ff7e348f-f4c7-417d-adfc-55811357de88","Type":"ContainerStarted","Data":"64f53be02a71dcd5d40594e3dd5eb0d16174f8f09802cf382eacd4a6251fb7b8"} Nov 28 13:21:02 crc kubenswrapper[4857]: I1128 13:21:02.522005 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-t448t"] Nov 28 13:21:02 crc kubenswrapper[4857]: I1128 13:21:02.679867 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:21:02 crc kubenswrapper[4857]: I1128 13:21:02.679926 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:21:02 crc kubenswrapper[4857]: I1128 13:21:02.687934 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:21:02 crc kubenswrapper[4857]: I1128 13:21:02.725225 4857 patch_prober.go:28] interesting pod/router-default-5444994796-zbwhx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:21:02 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:21:02 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:21:02 crc kubenswrapper[4857]: healthz check failed Nov 28 13:21:02 crc kubenswrapper[4857]: I1128 13:21:02.725302 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zbwhx" podUID="18a1ac68-146b-4c80-a763-df4b75e6698d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:21:03 crc kubenswrapper[4857]: I1128 13:21:03.178451 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:21:03 crc kubenswrapper[4857]: I1128 13:21:03.178524 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:21:03 crc kubenswrapper[4857]: I1128 13:21:03.461404 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxvfq" event={"ID":"7f760076-0358-4b0e-9b50-0d3d05d29a0e","Type":"ContainerStarted","Data":"13dd25304dd06595a8c15426903d9104adfb8e838e57bdeb028cfa5ee7828793"} Nov 28 13:21:03 crc kubenswrapper[4857]: I1128 13:21:03.464291 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v6x5g" event={"ID":"5ad31e12-d359-4a81-8e56-5431c271c7ce","Type":"ContainerStarted","Data":"cdb522b401bb2dda1e539cb281379fcc122b5e007880fe113bf09a95a1cd1acc"} Nov 28 13:21:03 crc kubenswrapper[4857]: I1128 13:21:03.472014 4857 generic.go:334] "Generic (PLEG): container finished" podID="4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7" containerID="9df2df3deee81e06ae3a9a1c744f1d3181a13bb5af59a2e1f7a3db02c8816fdb" exitCode=0 Nov 28 13:21:03 crc kubenswrapper[4857]: I1128 13:21:03.472130 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwsxc" event={"ID":"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7","Type":"ContainerDied","Data":"9df2df3deee81e06ae3a9a1c744f1d3181a13bb5af59a2e1f7a3db02c8816fdb"} Nov 28 13:21:03 crc kubenswrapper[4857]: I1128 13:21:03.474941 4857 generic.go:334] "Generic (PLEG): container finished" podID="fd3db365-db2a-4a0b-9485-bd38e8da6614" containerID="95bbb8e50a2ec7da2dbeec300e56fe9a39aea254e94d701d5bcfd6d99cd1cfe0" exitCode=0 Nov 28 13:21:03 crc kubenswrapper[4857]: I1128 13:21:03.475021 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fsv5j" event={"ID":"fd3db365-db2a-4a0b-9485-bd38e8da6614","Type":"ContainerDied","Data":"95bbb8e50a2ec7da2dbeec300e56fe9a39aea254e94d701d5bcfd6d99cd1cfe0"} Nov 28 13:21:03 crc kubenswrapper[4857]: I1128 13:21:03.477292 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-t448t" event={"ID":"5c497e5f-f362-48a4-bf34-833bfdc6de1b","Type":"ContainerStarted","Data":"6135be6f10d18f221c065ff137bf00efaf2ea1abbae947f2de9ba6964446b79b"} Nov 28 13:21:03 crc kubenswrapper[4857]: I1128 13:21:03.486383 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-2xzcp" Nov 28 13:21:03 crc kubenswrapper[4857]: I1128 13:21:03.700435 4857 patch_prober.go:28] interesting pod/router-default-5444994796-zbwhx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:21:03 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:21:03 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:21:03 crc kubenswrapper[4857]: healthz check failed Nov 28 13:21:03 crc kubenswrapper[4857]: I1128 13:21:03.700488 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zbwhx" podUID="18a1ac68-146b-4c80-a763-df4b75e6698d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:21:04 crc kubenswrapper[4857]: I1128 13:21:04.482885 4857 generic.go:334] "Generic (PLEG): container finished" podID="5ad31e12-d359-4a81-8e56-5431c271c7ce" containerID="cdb522b401bb2dda1e539cb281379fcc122b5e007880fe113bf09a95a1cd1acc" exitCode=0 Nov 28 13:21:04 crc kubenswrapper[4857]: I1128 13:21:04.482955 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v6x5g" event={"ID":"5ad31e12-d359-4a81-8e56-5431c271c7ce","Type":"ContainerDied","Data":"cdb522b401bb2dda1e539cb281379fcc122b5e007880fe113bf09a95a1cd1acc"} Nov 28 13:21:04 crc kubenswrapper[4857]: I1128 13:21:04.484634 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b","Type":"ContainerStarted","Data":"9ed4010f00abf62ab5f18770845a2ba4ff96cb4bcb07f0b5be575904902e49a5"} Nov 28 13:21:04 crc kubenswrapper[4857]: I1128 13:21:04.700551 4857 patch_prober.go:28] interesting pod/router-default-5444994796-zbwhx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:21:04 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:21:04 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:21:04 crc kubenswrapper[4857]: healthz check failed Nov 28 13:21:04 crc kubenswrapper[4857]: I1128 13:21:04.700621 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zbwhx" podUID="18a1ac68-146b-4c80-a763-df4b75e6698d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:21:05 crc kubenswrapper[4857]: I1128 13:21:05.030836 4857 patch_prober.go:28] interesting pod/downloads-7954f5f757-rc8cq container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.5:8080/\": dial tcp 10.217.0.5:8080: connect: connection refused" start-of-body= Nov 28 13:21:05 crc kubenswrapper[4857]: I1128 13:21:05.030894 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-rc8cq" podUID="de8cf09d-8247-4f1f-bce9-01472e9ee181" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.5:8080/\": dial tcp 10.217.0.5:8080: connect: connection refused" Nov 28 13:21:05 crc kubenswrapper[4857]: I1128 13:21:05.030850 4857 patch_prober.go:28] interesting pod/downloads-7954f5f757-rc8cq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.5:8080/\": dial tcp 10.217.0.5:8080: connect: connection refused" start-of-body= Nov 28 13:21:05 crc kubenswrapper[4857]: I1128 13:21:05.031274 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rc8cq" podUID="de8cf09d-8247-4f1f-bce9-01472e9ee181" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.5:8080/\": dial tcp 10.217.0.5:8080: connect: connection refused" Nov 28 13:21:05 crc kubenswrapper[4857]: I1128 13:21:05.490177 4857 generic.go:334] "Generic (PLEG): container finished" podID="7f760076-0358-4b0e-9b50-0d3d05d29a0e" containerID="13dd25304dd06595a8c15426903d9104adfb8e838e57bdeb028cfa5ee7828793" exitCode=0 Nov 28 13:21:05 crc kubenswrapper[4857]: I1128 13:21:05.490268 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxvfq" event={"ID":"7f760076-0358-4b0e-9b50-0d3d05d29a0e","Type":"ContainerDied","Data":"13dd25304dd06595a8c15426903d9104adfb8e838e57bdeb028cfa5ee7828793"} Nov 28 13:21:05 crc kubenswrapper[4857]: I1128 13:21:05.701528 4857 patch_prober.go:28] interesting pod/router-default-5444994796-zbwhx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:21:05 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:21:05 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:21:05 crc kubenswrapper[4857]: healthz check failed Nov 28 13:21:05 crc kubenswrapper[4857]: I1128 13:21:05.701600 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zbwhx" podUID="18a1ac68-146b-4c80-a763-df4b75e6698d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:21:06 crc kubenswrapper[4857]: I1128 13:21:06.512267 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=9.512250116 podStartE2EDuration="9.512250116s" podCreationTimestamp="2025-11-28 13:20:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:21:06.507350835 +0000 UTC m=+158.534726002" watchObservedRunningTime="2025-11-28 13:21:06.512250116 +0000 UTC m=+158.539625283" Nov 28 13:21:06 crc kubenswrapper[4857]: I1128 13:21:06.700891 4857 patch_prober.go:28] interesting pod/router-default-5444994796-zbwhx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:21:06 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:21:06 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:21:06 crc kubenswrapper[4857]: healthz check failed Nov 28 13:21:06 crc kubenswrapper[4857]: I1128 13:21:06.700936 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zbwhx" podUID="18a1ac68-146b-4c80-a763-df4b75e6698d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:21:07 crc kubenswrapper[4857]: I1128 13:21:07.072948 4857 patch_prober.go:28] interesting pod/console-f9d7485db-7plbl container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Nov 28 13:21:07 crc kubenswrapper[4857]: I1128 13:21:07.073286 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-7plbl" podUID="fef72e7c-9edd-4a6f-8648-aaaf65497bb6" containerName="console" probeResult="failure" output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" Nov 28 13:21:07 crc kubenswrapper[4857]: I1128 13:21:07.230681 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" Nov 28 13:21:07 crc kubenswrapper[4857]: I1128 13:21:07.701660 4857 patch_prober.go:28] interesting pod/router-default-5444994796-zbwhx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:21:07 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:21:07 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:21:07 crc kubenswrapper[4857]: healthz check failed Nov 28 13:21:07 crc kubenswrapper[4857]: I1128 13:21:07.701710 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zbwhx" podUID="18a1ac68-146b-4c80-a763-df4b75e6698d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:21:08 crc kubenswrapper[4857]: I1128 13:21:08.700135 4857 patch_prober.go:28] interesting pod/router-default-5444994796-zbwhx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:21:08 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:21:08 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:21:08 crc kubenswrapper[4857]: healthz check failed Nov 28 13:21:08 crc kubenswrapper[4857]: I1128 13:21:08.700452 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zbwhx" podUID="18a1ac68-146b-4c80-a763-df4b75e6698d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:21:09 crc kubenswrapper[4857]: I1128 13:21:09.523064 4857 generic.go:334] "Generic (PLEG): container finished" podID="ff7e348f-f4c7-417d-adfc-55811357de88" containerID="64f53be02a71dcd5d40594e3dd5eb0d16174f8f09802cf382eacd4a6251fb7b8" exitCode=0 Nov 28 13:21:09 crc kubenswrapper[4857]: I1128 13:21:09.523115 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"ff7e348f-f4c7-417d-adfc-55811357de88","Type":"ContainerDied","Data":"64f53be02a71dcd5d40594e3dd5eb0d16174f8f09802cf382eacd4a6251fb7b8"} Nov 28 13:21:09 crc kubenswrapper[4857]: I1128 13:21:09.525503 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" event={"ID":"03986f47-8037-41dd-a995-684a9296a676","Type":"ContainerStarted","Data":"b7ed0762f878e5be517e04ed507497d6a26ca8b8deed2ce5c0a69a5d0af9bd65"} Nov 28 13:21:09 crc kubenswrapper[4857]: I1128 13:21:09.528420 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-t448t" event={"ID":"5c497e5f-f362-48a4-bf34-833bfdc6de1b","Type":"ContainerStarted","Data":"c2a04d8226585d893f88c8b94e8de04f0b48cc41315ff31eccb846ce3d93fe7a"} Nov 28 13:21:09 crc kubenswrapper[4857]: I1128 13:21:09.531180 4857 generic.go:334] "Generic (PLEG): container finished" podID="762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b" containerID="9ed4010f00abf62ab5f18770845a2ba4ff96cb4bcb07f0b5be575904902e49a5" exitCode=0 Nov 28 13:21:09 crc kubenswrapper[4857]: I1128 13:21:09.531376 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b","Type":"ContainerDied","Data":"9ed4010f00abf62ab5f18770845a2ba4ff96cb4bcb07f0b5be575904902e49a5"} Nov 28 13:21:09 crc kubenswrapper[4857]: I1128 13:21:09.720995 4857 patch_prober.go:28] interesting pod/router-default-5444994796-zbwhx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:21:09 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:21:09 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:21:09 crc kubenswrapper[4857]: healthz check failed Nov 28 13:21:09 crc kubenswrapper[4857]: I1128 13:21:09.721064 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zbwhx" podUID="18a1ac68-146b-4c80-a763-df4b75e6698d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:21:10 crc kubenswrapper[4857]: I1128 13:21:10.702477 4857 patch_prober.go:28] interesting pod/router-default-5444994796-zbwhx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:21:10 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:21:10 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:21:10 crc kubenswrapper[4857]: healthz check failed Nov 28 13:21:10 crc kubenswrapper[4857]: I1128 13:21:10.702711 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zbwhx" podUID="18a1ac68-146b-4c80-a763-df4b75e6698d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:21:11 crc kubenswrapper[4857]: I1128 13:21:11.641355 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:21:11 crc kubenswrapper[4857]: I1128 13:21:11.691968 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-9g9fd" podStartSLOduration=27.69195147 podStartE2EDuration="27.69195147s" podCreationTimestamp="2025-11-28 13:20:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:21:11.688304145 +0000 UTC m=+163.715679312" watchObservedRunningTime="2025-11-28 13:21:11.69195147 +0000 UTC m=+163.719326627" Nov 28 13:21:11 crc kubenswrapper[4857]: I1128 13:21:11.692438 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-t448t" podStartSLOduration=143.692432223 podStartE2EDuration="2m23.692432223s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:21:11.668167966 +0000 UTC m=+163.695543133" watchObservedRunningTime="2025-11-28 13:21:11.692432223 +0000 UTC m=+163.719807390" Nov 28 13:21:11 crc kubenswrapper[4857]: I1128 13:21:11.700859 4857 patch_prober.go:28] interesting pod/router-default-5444994796-zbwhx container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:21:11 crc kubenswrapper[4857]: [+]has-synced ok Nov 28 13:21:11 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:21:11 crc kubenswrapper[4857]: healthz check failed Nov 28 13:21:11 crc kubenswrapper[4857]: I1128 13:21:11.700911 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zbwhx" podUID="18a1ac68-146b-4c80-a763-df4b75e6698d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:21:11 crc kubenswrapper[4857]: I1128 13:21:11.772609 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs\") pod \"network-metrics-daemon-jspn8\" (UID: \"9ab9b94a-66a7-4d68-8046-d6d97595330d\") " pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:21:11 crc kubenswrapper[4857]: I1128 13:21:11.791969 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ab9b94a-66a7-4d68-8046-d6d97595330d-metrics-certs\") pod \"network-metrics-daemon-jspn8\" (UID: \"9ab9b94a-66a7-4d68-8046-d6d97595330d\") " pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:21:12 crc kubenswrapper[4857]: I1128 13:21:12.044142 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jspn8" Nov 28 13:21:12 crc kubenswrapper[4857]: I1128 13:21:12.700897 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:21:12 crc kubenswrapper[4857]: I1128 13:21:12.702654 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-zbwhx" Nov 28 13:21:15 crc kubenswrapper[4857]: I1128 13:21:15.036648 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-rc8cq" Nov 28 13:21:17 crc kubenswrapper[4857]: I1128 13:21:17.345307 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:21:17 crc kubenswrapper[4857]: I1128 13:21:17.349837 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:21:18 crc kubenswrapper[4857]: I1128 13:21:18.843437 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:21:19 crc kubenswrapper[4857]: I1128 13:21:19.006442 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b-kubelet-dir\") pod \"762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b\" (UID: \"762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b\") " Nov 28 13:21:19 crc kubenswrapper[4857]: I1128 13:21:19.006532 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b-kube-api-access\") pod \"762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b\" (UID: \"762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b\") " Nov 28 13:21:19 crc kubenswrapper[4857]: I1128 13:21:19.006583 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b" (UID: "762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:21:19 crc kubenswrapper[4857]: I1128 13:21:19.006726 4857 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:21:19 crc kubenswrapper[4857]: I1128 13:21:19.014893 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b" (UID: "762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:21:19 crc kubenswrapper[4857]: I1128 13:21:19.107365 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 13:21:19 crc kubenswrapper[4857]: I1128 13:21:19.700274 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b","Type":"ContainerDied","Data":"d15533417d20081e10fc39e206ee5c4c2f117715a424267212ad9b9143ad9389"} Nov 28 13:21:19 crc kubenswrapper[4857]: I1128 13:21:19.700370 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d15533417d20081e10fc39e206ee5c4c2f117715a424267212ad9b9143ad9389" Nov 28 13:21:19 crc kubenswrapper[4857]: I1128 13:21:19.700532 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:21:27 crc kubenswrapper[4857]: I1128 13:21:27.196068 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wlfw4" Nov 28 13:21:32 crc kubenswrapper[4857]: I1128 13:21:32.279601 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:21:32 crc kubenswrapper[4857]: I1128 13:21:32.515884 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:21:32 crc kubenswrapper[4857]: I1128 13:21:32.594673 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ff7e348f-f4c7-417d-adfc-55811357de88-kubelet-dir\") pod \"ff7e348f-f4c7-417d-adfc-55811357de88\" (UID: \"ff7e348f-f4c7-417d-adfc-55811357de88\") " Nov 28 13:21:32 crc kubenswrapper[4857]: I1128 13:21:32.594873 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ff7e348f-f4c7-417d-adfc-55811357de88-kube-api-access\") pod \"ff7e348f-f4c7-417d-adfc-55811357de88\" (UID: \"ff7e348f-f4c7-417d-adfc-55811357de88\") " Nov 28 13:21:32 crc kubenswrapper[4857]: I1128 13:21:32.595091 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ff7e348f-f4c7-417d-adfc-55811357de88-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "ff7e348f-f4c7-417d-adfc-55811357de88" (UID: "ff7e348f-f4c7-417d-adfc-55811357de88"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:21:32 crc kubenswrapper[4857]: I1128 13:21:32.600803 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff7e348f-f4c7-417d-adfc-55811357de88-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "ff7e348f-f4c7-417d-adfc-55811357de88" (UID: "ff7e348f-f4c7-417d-adfc-55811357de88"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:21:32 crc kubenswrapper[4857]: I1128 13:21:32.697499 4857 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ff7e348f-f4c7-417d-adfc-55811357de88-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:21:32 crc kubenswrapper[4857]: I1128 13:21:32.697551 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ff7e348f-f4c7-417d-adfc-55811357de88-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 13:21:32 crc kubenswrapper[4857]: I1128 13:21:32.793023 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"ff7e348f-f4c7-417d-adfc-55811357de88","Type":"ContainerDied","Data":"032b2fe864512f1d8fc00e6ad5870092441e555961d3575c272d4effa46fe12b"} Nov 28 13:21:32 crc kubenswrapper[4857]: I1128 13:21:32.793062 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="032b2fe864512f1d8fc00e6ad5870092441e555961d3575c272d4effa46fe12b" Nov 28 13:21:32 crc kubenswrapper[4857]: I1128 13:21:32.793091 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:21:33 crc kubenswrapper[4857]: I1128 13:21:33.178107 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:21:33 crc kubenswrapper[4857]: I1128 13:21:33.178236 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:21:37 crc kubenswrapper[4857]: I1128 13:21:37.395323 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:21:38 crc kubenswrapper[4857]: I1128 13:21:38.593928 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 13:21:38 crc kubenswrapper[4857]: E1128 13:21:38.594398 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b" containerName="pruner" Nov 28 13:21:38 crc kubenswrapper[4857]: I1128 13:21:38.594410 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b" containerName="pruner" Nov 28 13:21:38 crc kubenswrapper[4857]: E1128 13:21:38.594418 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff7e348f-f4c7-417d-adfc-55811357de88" containerName="pruner" Nov 28 13:21:38 crc kubenswrapper[4857]: I1128 13:21:38.594423 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff7e348f-f4c7-417d-adfc-55811357de88" containerName="pruner" Nov 28 13:21:38 crc kubenswrapper[4857]: I1128 13:21:38.594520 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="762ce37f-0c3e-4524-9e3c-cf9f4a7bfe7b" containerName="pruner" Nov 28 13:21:38 crc kubenswrapper[4857]: I1128 13:21:38.594530 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff7e348f-f4c7-417d-adfc-55811357de88" containerName="pruner" Nov 28 13:21:38 crc kubenswrapper[4857]: I1128 13:21:38.594871 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:21:38 crc kubenswrapper[4857]: I1128 13:21:38.601409 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 13:21:38 crc kubenswrapper[4857]: I1128 13:21:38.604212 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 28 13:21:38 crc kubenswrapper[4857]: I1128 13:21:38.604918 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 28 13:21:38 crc kubenswrapper[4857]: I1128 13:21:38.782355 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f1ad4ded-68c6-4b81-a74b-017f2bb594ca-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"f1ad4ded-68c6-4b81-a74b-017f2bb594ca\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:21:38 crc kubenswrapper[4857]: I1128 13:21:38.782410 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f1ad4ded-68c6-4b81-a74b-017f2bb594ca-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"f1ad4ded-68c6-4b81-a74b-017f2bb594ca\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:21:38 crc kubenswrapper[4857]: I1128 13:21:38.884061 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f1ad4ded-68c6-4b81-a74b-017f2bb594ca-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"f1ad4ded-68c6-4b81-a74b-017f2bb594ca\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:21:38 crc kubenswrapper[4857]: I1128 13:21:38.884134 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f1ad4ded-68c6-4b81-a74b-017f2bb594ca-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"f1ad4ded-68c6-4b81-a74b-017f2bb594ca\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:21:38 crc kubenswrapper[4857]: I1128 13:21:38.884414 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f1ad4ded-68c6-4b81-a74b-017f2bb594ca-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"f1ad4ded-68c6-4b81-a74b-017f2bb594ca\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:21:38 crc kubenswrapper[4857]: I1128 13:21:38.903110 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f1ad4ded-68c6-4b81-a74b-017f2bb594ca-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"f1ad4ded-68c6-4b81-a74b-017f2bb594ca\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:21:38 crc kubenswrapper[4857]: I1128 13:21:38.977659 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:21:43 crc kubenswrapper[4857]: I1128 13:21:43.201438 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 13:21:43 crc kubenswrapper[4857]: I1128 13:21:43.202688 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:21:43 crc kubenswrapper[4857]: I1128 13:21:43.213428 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 13:21:43 crc kubenswrapper[4857]: I1128 13:21:43.385716 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/2a2aed81-340c-4ac4-993e-409deefce9b8-var-lock\") pod \"installer-9-crc\" (UID: \"2a2aed81-340c-4ac4-993e-409deefce9b8\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:21:43 crc kubenswrapper[4857]: I1128 13:21:43.385818 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2a2aed81-340c-4ac4-993e-409deefce9b8-kube-api-access\") pod \"installer-9-crc\" (UID: \"2a2aed81-340c-4ac4-993e-409deefce9b8\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:21:43 crc kubenswrapper[4857]: I1128 13:21:43.385860 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2a2aed81-340c-4ac4-993e-409deefce9b8-kubelet-dir\") pod \"installer-9-crc\" (UID: \"2a2aed81-340c-4ac4-993e-409deefce9b8\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:21:43 crc kubenswrapper[4857]: I1128 13:21:43.488533 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2a2aed81-340c-4ac4-993e-409deefce9b8-kube-api-access\") pod \"installer-9-crc\" (UID: \"2a2aed81-340c-4ac4-993e-409deefce9b8\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:21:43 crc kubenswrapper[4857]: I1128 13:21:43.488586 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2a2aed81-340c-4ac4-993e-409deefce9b8-kubelet-dir\") pod \"installer-9-crc\" (UID: \"2a2aed81-340c-4ac4-993e-409deefce9b8\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:21:43 crc kubenswrapper[4857]: I1128 13:21:43.488638 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/2a2aed81-340c-4ac4-993e-409deefce9b8-var-lock\") pod \"installer-9-crc\" (UID: \"2a2aed81-340c-4ac4-993e-409deefce9b8\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:21:43 crc kubenswrapper[4857]: I1128 13:21:43.488694 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/2a2aed81-340c-4ac4-993e-409deefce9b8-var-lock\") pod \"installer-9-crc\" (UID: \"2a2aed81-340c-4ac4-993e-409deefce9b8\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:21:43 crc kubenswrapper[4857]: I1128 13:21:43.489007 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2a2aed81-340c-4ac4-993e-409deefce9b8-kubelet-dir\") pod \"installer-9-crc\" (UID: \"2a2aed81-340c-4ac4-993e-409deefce9b8\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:21:43 crc kubenswrapper[4857]: I1128 13:21:43.507002 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2a2aed81-340c-4ac4-993e-409deefce9b8-kube-api-access\") pod \"installer-9-crc\" (UID: \"2a2aed81-340c-4ac4-993e-409deefce9b8\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:21:43 crc kubenswrapper[4857]: I1128 13:21:43.531537 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:21:47 crc kubenswrapper[4857]: E1128 13:21:47.132406 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 28 13:21:47 crc kubenswrapper[4857]: E1128 13:21:47.133094 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5dsks,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-r2fq8_openshift-marketplace(28962db4-abc0-431e-832c-01246a09d048): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 13:21:47 crc kubenswrapper[4857]: E1128 13:21:47.134413 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-r2fq8" podUID="28962db4-abc0-431e-832c-01246a09d048" Nov 28 13:21:48 crc kubenswrapper[4857]: E1128 13:21:48.604234 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 28 13:21:48 crc kubenswrapper[4857]: E1128 13:21:48.605006 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-klczj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-l2hg8_openshift-marketplace(f6949630-5993-404f-8177-fddca689d6b1): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 13:21:48 crc kubenswrapper[4857]: E1128 13:21:48.606185 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-l2hg8" podUID="f6949630-5993-404f-8177-fddca689d6b1" Nov 28 13:21:48 crc kubenswrapper[4857]: E1128 13:21:48.625695 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 28 13:21:48 crc kubenswrapper[4857]: E1128 13:21:48.625851 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2grhj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-jml4b_openshift-marketplace(d34958af-3c7b-4821-8fa8-af2ec4591af5): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 13:21:48 crc kubenswrapper[4857]: E1128 13:21:48.627053 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-jml4b" podUID="d34958af-3c7b-4821-8fa8-af2ec4591af5" Nov 28 13:21:48 crc kubenswrapper[4857]: E1128 13:21:48.646087 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 28 13:21:48 crc kubenswrapper[4857]: E1128 13:21:48.646723 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ph868,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-b678s_openshift-marketplace(fd1f9d7f-303b-4372-8937-0a7b31e45355): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 13:21:48 crc kubenswrapper[4857]: E1128 13:21:48.650830 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-b678s" podUID="fd1f9d7f-303b-4372-8937-0a7b31e45355" Nov 28 13:21:48 crc kubenswrapper[4857]: I1128 13:21:48.976271 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-jspn8"] Nov 28 13:21:50 crc kubenswrapper[4857]: E1128 13:21:50.894128 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-jml4b" podUID="d34958af-3c7b-4821-8fa8-af2ec4591af5" Nov 28 13:21:50 crc kubenswrapper[4857]: E1128 13:21:50.905320 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 28 13:21:50 crc kubenswrapper[4857]: E1128 13:21:50.905479 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wkngh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-fsv5j_openshift-marketplace(fd3db365-db2a-4a0b-9485-bd38e8da6614): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 13:21:50 crc kubenswrapper[4857]: E1128 13:21:50.906682 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-fsv5j" podUID="fd3db365-db2a-4a0b-9485-bd38e8da6614" Nov 28 13:21:55 crc kubenswrapper[4857]: W1128 13:21:55.891853 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9ab9b94a_66a7_4d68_8046_d6d97595330d.slice/crio-2f2f41ed57059165ede8952d4f0b06984521cd5c59618f968a5bd27c301d99b9 WatchSource:0}: Error finding container 2f2f41ed57059165ede8952d4f0b06984521cd5c59618f968a5bd27c301d99b9: Status 404 returned error can't find the container with id 2f2f41ed57059165ede8952d4f0b06984521cd5c59618f968a5bd27c301d99b9 Nov 28 13:21:55 crc kubenswrapper[4857]: E1128 13:21:55.966061 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 28 13:21:55 crc kubenswrapper[4857]: E1128 13:21:55.966196 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7tq8w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-vwsxc_openshift-marketplace(4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 13:21:55 crc kubenswrapper[4857]: E1128 13:21:55.967377 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-vwsxc" podUID="4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7" Nov 28 13:21:55 crc kubenswrapper[4857]: I1128 13:21:55.967776 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-jspn8" event={"ID":"9ab9b94a-66a7-4d68-8046-d6d97595330d","Type":"ContainerStarted","Data":"2f2f41ed57059165ede8952d4f0b06984521cd5c59618f968a5bd27c301d99b9"} Nov 28 13:22:03 crc kubenswrapper[4857]: I1128 13:22:03.178569 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:22:03 crc kubenswrapper[4857]: I1128 13:22:03.179092 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:22:03 crc kubenswrapper[4857]: I1128 13:22:03.179149 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:22:03 crc kubenswrapper[4857]: I1128 13:22:03.179710 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013"} pod="openshift-machine-config-operator/machine-config-daemon-jdgls" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 13:22:03 crc kubenswrapper[4857]: I1128 13:22:03.179869 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" containerID="cri-o://ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013" gracePeriod=600 Nov 28 13:22:04 crc kubenswrapper[4857]: I1128 13:22:04.007319 4857 generic.go:334] "Generic (PLEG): container finished" podID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerID="ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013" exitCode=0 Nov 28 13:22:04 crc kubenswrapper[4857]: I1128 13:22:04.007840 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerDied","Data":"ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013"} Nov 28 13:22:04 crc kubenswrapper[4857]: I1128 13:22:04.227162 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 13:22:04 crc kubenswrapper[4857]: I1128 13:22:04.241585 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 13:22:05 crc kubenswrapper[4857]: I1128 13:22:05.015393 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"2a2aed81-340c-4ac4-993e-409deefce9b8","Type":"ContainerStarted","Data":"eb0d0cf8d029231ae0adfda8313dfe6c0db036480acbe62ef0f895dc1018a549"} Nov 28 13:22:05 crc kubenswrapper[4857]: I1128 13:22:05.020441 4857 generic.go:334] "Generic (PLEG): container finished" podID="5ad31e12-d359-4a81-8e56-5431c271c7ce" containerID="af44d88a1ab662afc0e537f2e70fb2550f70303380ce0aa5e5ef88af7f8efc34" exitCode=0 Nov 28 13:22:05 crc kubenswrapper[4857]: I1128 13:22:05.020533 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v6x5g" event={"ID":"5ad31e12-d359-4a81-8e56-5431c271c7ce","Type":"ContainerDied","Data":"af44d88a1ab662afc0e537f2e70fb2550f70303380ce0aa5e5ef88af7f8efc34"} Nov 28 13:22:05 crc kubenswrapper[4857]: I1128 13:22:05.042828 4857 generic.go:334] "Generic (PLEG): container finished" podID="fd3db365-db2a-4a0b-9485-bd38e8da6614" containerID="46e4b361d60e3ba86fb16195bc7b10cf3436e7968550aa498727abdcbc1e2f19" exitCode=0 Nov 28 13:22:05 crc kubenswrapper[4857]: I1128 13:22:05.042981 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fsv5j" event={"ID":"fd3db365-db2a-4a0b-9485-bd38e8da6614","Type":"ContainerDied","Data":"46e4b361d60e3ba86fb16195bc7b10cf3436e7968550aa498727abdcbc1e2f19"} Nov 28 13:22:05 crc kubenswrapper[4857]: I1128 13:22:05.047685 4857 generic.go:334] "Generic (PLEG): container finished" podID="7f760076-0358-4b0e-9b50-0d3d05d29a0e" containerID="c130f9082d669e254909d53a0287d9ad2b78c4b457e788a5df983dea6c2d813d" exitCode=0 Nov 28 13:22:05 crc kubenswrapper[4857]: I1128 13:22:05.047740 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxvfq" event={"ID":"7f760076-0358-4b0e-9b50-0d3d05d29a0e","Type":"ContainerDied","Data":"c130f9082d669e254909d53a0287d9ad2b78c4b457e788a5df983dea6c2d813d"} Nov 28 13:22:05 crc kubenswrapper[4857]: I1128 13:22:05.056532 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerStarted","Data":"a683cf012ac6614e5b0b9da4523354a6e417c246529cbc1812a88525172cf275"} Nov 28 13:22:05 crc kubenswrapper[4857]: I1128 13:22:05.065845 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b678s" event={"ID":"fd1f9d7f-303b-4372-8937-0a7b31e45355","Type":"ContainerStarted","Data":"ac9a4344bfcb7847e265522f2a687dd54a9a0951ddd36b235f61b1241fbd6a6e"} Nov 28 13:22:05 crc kubenswrapper[4857]: I1128 13:22:05.070892 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"f1ad4ded-68c6-4b81-a74b-017f2bb594ca","Type":"ContainerStarted","Data":"4714a971980485f89d5fc070851c850dd292f974c9f95b5d37471171d6b8fbd9"} Nov 28 13:22:05 crc kubenswrapper[4857]: I1128 13:22:05.074592 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-jspn8" event={"ID":"9ab9b94a-66a7-4d68-8046-d6d97595330d","Type":"ContainerStarted","Data":"5fc2d91f1d3ac08bf6ea04754ee707cf3c4360a36b08b009ebc53a9edf202851"} Nov 28 13:22:05 crc kubenswrapper[4857]: I1128 13:22:05.074625 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-jspn8" event={"ID":"9ab9b94a-66a7-4d68-8046-d6d97595330d","Type":"ContainerStarted","Data":"f9eb46a7b0f1cf1cef4ea5cf4b66f0d2f7272952a546201613ea8286385e72c6"} Nov 28 13:22:05 crc kubenswrapper[4857]: I1128 13:22:05.076496 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r2fq8" event={"ID":"28962db4-abc0-431e-832c-01246a09d048","Type":"ContainerStarted","Data":"a59abcc27d2ccfb35218ae24ff7f255734a53244847f37b47e4f71ef201c4893"} Nov 28 13:22:05 crc kubenswrapper[4857]: I1128 13:22:05.104828 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2hg8" event={"ID":"f6949630-5993-404f-8177-fddca689d6b1","Type":"ContainerStarted","Data":"8f12a7afe35692eeacced148fb6346bf486a60332d869b84d3f5564602f63563"} Nov 28 13:22:05 crc kubenswrapper[4857]: I1128 13:22:05.145378 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=27.145350229 podStartE2EDuration="27.145350229s" podCreationTimestamp="2025-11-28 13:21:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:22:05.126438606 +0000 UTC m=+217.153813773" watchObservedRunningTime="2025-11-28 13:22:05.145350229 +0000 UTC m=+217.172725396" Nov 28 13:22:05 crc kubenswrapper[4857]: I1128 13:22:05.152811 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-jspn8" podStartSLOduration=197.152797343 podStartE2EDuration="3m17.152797343s" podCreationTimestamp="2025-11-28 13:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:22:05.150530668 +0000 UTC m=+217.177905835" watchObservedRunningTime="2025-11-28 13:22:05.152797343 +0000 UTC m=+217.180172510" Nov 28 13:22:06 crc kubenswrapper[4857]: I1128 13:22:06.130532 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-hzw48"] Nov 28 13:22:06 crc kubenswrapper[4857]: I1128 13:22:06.144315 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jml4b" event={"ID":"d34958af-3c7b-4821-8fa8-af2ec4591af5","Type":"ContainerStarted","Data":"4b2a6195c98724679524bd298e93b827ccb967a70a0c0ff283c8e4102787927e"} Nov 28 13:22:06 crc kubenswrapper[4857]: I1128 13:22:06.149027 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"2a2aed81-340c-4ac4-993e-409deefce9b8","Type":"ContainerStarted","Data":"050d2a56bb9f6fadaef1b1727096ced6ca0f3bc1334bf2b0bc463ce637e94633"} Nov 28 13:22:06 crc kubenswrapper[4857]: I1128 13:22:06.162654 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"f1ad4ded-68c6-4b81-a74b-017f2bb594ca","Type":"ContainerStarted","Data":"11bad9bef3250d98433bdccf544b6e5440a0b1680d81d96a3effbc3bfe1998b7"} Nov 28 13:22:06 crc kubenswrapper[4857]: I1128 13:22:06.295824 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=23.295730899 podStartE2EDuration="23.295730899s" podCreationTimestamp="2025-11-28 13:21:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:22:06.294858564 +0000 UTC m=+218.322233731" watchObservedRunningTime="2025-11-28 13:22:06.295730899 +0000 UTC m=+218.323106056" Nov 28 13:22:07 crc kubenswrapper[4857]: I1128 13:22:07.171504 4857 generic.go:334] "Generic (PLEG): container finished" podID="f6949630-5993-404f-8177-fddca689d6b1" containerID="8f12a7afe35692eeacced148fb6346bf486a60332d869b84d3f5564602f63563" exitCode=0 Nov 28 13:22:07 crc kubenswrapper[4857]: I1128 13:22:07.171582 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2hg8" event={"ID":"f6949630-5993-404f-8177-fddca689d6b1","Type":"ContainerDied","Data":"8f12a7afe35692eeacced148fb6346bf486a60332d869b84d3f5564602f63563"} Nov 28 13:22:07 crc kubenswrapper[4857]: I1128 13:22:07.173486 4857 generic.go:334] "Generic (PLEG): container finished" podID="d34958af-3c7b-4821-8fa8-af2ec4591af5" containerID="4b2a6195c98724679524bd298e93b827ccb967a70a0c0ff283c8e4102787927e" exitCode=0 Nov 28 13:22:07 crc kubenswrapper[4857]: I1128 13:22:07.173575 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jml4b" event={"ID":"d34958af-3c7b-4821-8fa8-af2ec4591af5","Type":"ContainerDied","Data":"4b2a6195c98724679524bd298e93b827ccb967a70a0c0ff283c8e4102787927e"} Nov 28 13:22:07 crc kubenswrapper[4857]: I1128 13:22:07.176293 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxvfq" event={"ID":"7f760076-0358-4b0e-9b50-0d3d05d29a0e","Type":"ContainerStarted","Data":"4549f3b8edb5c5e42a161320654b635cf675f0b4efdc1441970b0adf53041b9f"} Nov 28 13:22:07 crc kubenswrapper[4857]: I1128 13:22:07.178381 4857 generic.go:334] "Generic (PLEG): container finished" podID="fd1f9d7f-303b-4372-8937-0a7b31e45355" containerID="ac9a4344bfcb7847e265522f2a687dd54a9a0951ddd36b235f61b1241fbd6a6e" exitCode=0 Nov 28 13:22:07 crc kubenswrapper[4857]: I1128 13:22:07.178432 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b678s" event={"ID":"fd1f9d7f-303b-4372-8937-0a7b31e45355","Type":"ContainerDied","Data":"ac9a4344bfcb7847e265522f2a687dd54a9a0951ddd36b235f61b1241fbd6a6e"} Nov 28 13:22:07 crc kubenswrapper[4857]: I1128 13:22:07.181845 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v6x5g" event={"ID":"5ad31e12-d359-4a81-8e56-5431c271c7ce","Type":"ContainerStarted","Data":"c4ec779224a265bcedcdae343651f3fec17516a0ca22e5ac6a5ddb55d776ecde"} Nov 28 13:22:07 crc kubenswrapper[4857]: I1128 13:22:07.183603 4857 generic.go:334] "Generic (PLEG): container finished" podID="28962db4-abc0-431e-832c-01246a09d048" containerID="a59abcc27d2ccfb35218ae24ff7f255734a53244847f37b47e4f71ef201c4893" exitCode=0 Nov 28 13:22:07 crc kubenswrapper[4857]: I1128 13:22:07.183685 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r2fq8" event={"ID":"28962db4-abc0-431e-832c-01246a09d048","Type":"ContainerDied","Data":"a59abcc27d2ccfb35218ae24ff7f255734a53244847f37b47e4f71ef201c4893"} Nov 28 13:22:07 crc kubenswrapper[4857]: I1128 13:22:07.185764 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fsv5j" event={"ID":"fd3db365-db2a-4a0b-9485-bd38e8da6614","Type":"ContainerStarted","Data":"910e566d5eb148da2b48c1e9737b5084007dd5e7c0a27a8b95a646b7d5494877"} Nov 28 13:22:07 crc kubenswrapper[4857]: I1128 13:22:07.233015 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-v6x5g" podStartSLOduration=22.600095921 podStartE2EDuration="1m10.232997627s" podCreationTimestamp="2025-11-28 13:20:57 +0000 UTC" firstStartedPulling="2025-11-28 13:21:18.788053361 +0000 UTC m=+170.815428558" lastFinishedPulling="2025-11-28 13:22:06.420955097 +0000 UTC m=+218.448330264" observedRunningTime="2025-11-28 13:22:07.231229327 +0000 UTC m=+219.258604494" watchObservedRunningTime="2025-11-28 13:22:07.232997627 +0000 UTC m=+219.260372794" Nov 28 13:22:07 crc kubenswrapper[4857]: I1128 13:22:07.258129 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wxvfq" podStartSLOduration=21.711011178 podStartE2EDuration="1m9.258110949s" podCreationTimestamp="2025-11-28 13:20:58 +0000 UTC" firstStartedPulling="2025-11-28 13:21:18.788093692 +0000 UTC m=+170.815468859" lastFinishedPulling="2025-11-28 13:22:06.335193473 +0000 UTC m=+218.362568630" observedRunningTime="2025-11-28 13:22:07.255280828 +0000 UTC m=+219.282655995" watchObservedRunningTime="2025-11-28 13:22:07.258110949 +0000 UTC m=+219.285486116" Nov 28 13:22:07 crc kubenswrapper[4857]: I1128 13:22:07.374797 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fsv5j" podStartSLOduration=11.401211996 podStartE2EDuration="1m10.374779411s" podCreationTimestamp="2025-11-28 13:20:57 +0000 UTC" firstStartedPulling="2025-11-28 13:21:07.504124072 +0000 UTC m=+159.531499239" lastFinishedPulling="2025-11-28 13:22:06.477691487 +0000 UTC m=+218.505066654" observedRunningTime="2025-11-28 13:22:07.334199435 +0000 UTC m=+219.361574612" watchObservedRunningTime="2025-11-28 13:22:07.374779411 +0000 UTC m=+219.402154578" Nov 28 13:22:07 crc kubenswrapper[4857]: I1128 13:22:07.736704 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fsv5j" Nov 28 13:22:07 crc kubenswrapper[4857]: I1128 13:22:07.737165 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fsv5j" Nov 28 13:22:08 crc kubenswrapper[4857]: I1128 13:22:08.222093 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r2fq8" event={"ID":"28962db4-abc0-431e-832c-01246a09d048","Type":"ContainerStarted","Data":"d55cce3979b934f81fb0eba894e38b7ef2f36e552951331fd7b3fcb168e0f8a9"} Nov 28 13:22:08 crc kubenswrapper[4857]: I1128 13:22:08.224417 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2hg8" event={"ID":"f6949630-5993-404f-8177-fddca689d6b1","Type":"ContainerStarted","Data":"96f3c97d24ed26f0109ac0328d65acac3ba7c4d48c28ba0ff2c308bd3fd0a64e"} Nov 28 13:22:08 crc kubenswrapper[4857]: I1128 13:22:08.226210 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jml4b" event={"ID":"d34958af-3c7b-4821-8fa8-af2ec4591af5","Type":"ContainerStarted","Data":"80879ef75664bdbfb1e0299672ca41706942acc2affd837a2dd2a302b6ae3657"} Nov 28 13:22:08 crc kubenswrapper[4857]: I1128 13:22:08.228768 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b678s" event={"ID":"fd1f9d7f-303b-4372-8937-0a7b31e45355","Type":"ContainerStarted","Data":"61ddf71dddee65074db13459715fe261575b723a7f61a6eb6159f13c0576576e"} Nov 28 13:22:08 crc kubenswrapper[4857]: I1128 13:22:08.229837 4857 generic.go:334] "Generic (PLEG): container finished" podID="f1ad4ded-68c6-4b81-a74b-017f2bb594ca" containerID="11bad9bef3250d98433bdccf544b6e5440a0b1680d81d96a3effbc3bfe1998b7" exitCode=0 Nov 28 13:22:08 crc kubenswrapper[4857]: I1128 13:22:08.230188 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"f1ad4ded-68c6-4b81-a74b-017f2bb594ca","Type":"ContainerDied","Data":"11bad9bef3250d98433bdccf544b6e5440a0b1680d81d96a3effbc3bfe1998b7"} Nov 28 13:22:08 crc kubenswrapper[4857]: I1128 13:22:08.238009 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-v6x5g" Nov 28 13:22:08 crc kubenswrapper[4857]: I1128 13:22:08.238036 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-v6x5g" Nov 28 13:22:08 crc kubenswrapper[4857]: I1128 13:22:08.248117 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-r2fq8" podStartSLOduration=5.886136288 podStartE2EDuration="1m13.248101752s" podCreationTimestamp="2025-11-28 13:20:55 +0000 UTC" firstStartedPulling="2025-11-28 13:21:00.43199364 +0000 UTC m=+152.459368807" lastFinishedPulling="2025-11-28 13:22:07.793959104 +0000 UTC m=+219.821334271" observedRunningTime="2025-11-28 13:22:08.246866506 +0000 UTC m=+220.274241693" watchObservedRunningTime="2025-11-28 13:22:08.248101752 +0000 UTC m=+220.275476919" Nov 28 13:22:08 crc kubenswrapper[4857]: I1128 13:22:08.297125 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-b678s" podStartSLOduration=6.126628987 podStartE2EDuration="1m13.29710906s" podCreationTimestamp="2025-11-28 13:20:55 +0000 UTC" firstStartedPulling="2025-11-28 13:21:00.432109033 +0000 UTC m=+152.459484200" lastFinishedPulling="2025-11-28 13:22:07.602589106 +0000 UTC m=+219.629964273" observedRunningTime="2025-11-28 13:22:08.27937452 +0000 UTC m=+220.306749707" watchObservedRunningTime="2025-11-28 13:22:08.29710906 +0000 UTC m=+220.324484217" Nov 28 13:22:08 crc kubenswrapper[4857]: I1128 13:22:08.299579 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-l2hg8" podStartSLOduration=5.983541586 podStartE2EDuration="1m13.29956912s" podCreationTimestamp="2025-11-28 13:20:55 +0000 UTC" firstStartedPulling="2025-11-28 13:21:00.432031901 +0000 UTC m=+152.459407068" lastFinishedPulling="2025-11-28 13:22:07.748059435 +0000 UTC m=+219.775434602" observedRunningTime="2025-11-28 13:22:08.295827913 +0000 UTC m=+220.323203080" watchObservedRunningTime="2025-11-28 13:22:08.29956912 +0000 UTC m=+220.326944287" Nov 28 13:22:08 crc kubenswrapper[4857]: I1128 13:22:08.330935 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jml4b" podStartSLOduration=4.035367294 podStartE2EDuration="1m13.330918751s" podCreationTimestamp="2025-11-28 13:20:55 +0000 UTC" firstStartedPulling="2025-11-28 13:20:58.370474201 +0000 UTC m=+150.397849368" lastFinishedPulling="2025-11-28 13:22:07.666025658 +0000 UTC m=+219.693400825" observedRunningTime="2025-11-28 13:22:08.326899176 +0000 UTC m=+220.354274343" watchObservedRunningTime="2025-11-28 13:22:08.330918751 +0000 UTC m=+220.358293918" Nov 28 13:22:08 crc kubenswrapper[4857]: I1128 13:22:08.824231 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-fsv5j" podUID="fd3db365-db2a-4a0b-9485-bd38e8da6614" containerName="registry-server" probeResult="failure" output=< Nov 28 13:22:08 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 13:22:08 crc kubenswrapper[4857]: > Nov 28 13:22:08 crc kubenswrapper[4857]: I1128 13:22:08.868127 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wxvfq" Nov 28 13:22:08 crc kubenswrapper[4857]: I1128 13:22:08.868235 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wxvfq" Nov 28 13:22:09 crc kubenswrapper[4857]: I1128 13:22:09.235480 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwsxc" event={"ID":"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7","Type":"ContainerStarted","Data":"4203fb8a05b40b7e1e361aa2273359f2f893b2810a8ee1b09cc8f9bac79322f3"} Nov 28 13:22:09 crc kubenswrapper[4857]: I1128 13:22:09.350356 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-v6x5g" podUID="5ad31e12-d359-4a81-8e56-5431c271c7ce" containerName="registry-server" probeResult="failure" output=< Nov 28 13:22:09 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 13:22:09 crc kubenswrapper[4857]: > Nov 28 13:22:09 crc kubenswrapper[4857]: I1128 13:22:09.704110 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:22:09 crc kubenswrapper[4857]: I1128 13:22:09.758280 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f1ad4ded-68c6-4b81-a74b-017f2bb594ca-kube-api-access\") pod \"f1ad4ded-68c6-4b81-a74b-017f2bb594ca\" (UID: \"f1ad4ded-68c6-4b81-a74b-017f2bb594ca\") " Nov 28 13:22:09 crc kubenswrapper[4857]: I1128 13:22:09.758668 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f1ad4ded-68c6-4b81-a74b-017f2bb594ca-kubelet-dir\") pod \"f1ad4ded-68c6-4b81-a74b-017f2bb594ca\" (UID: \"f1ad4ded-68c6-4b81-a74b-017f2bb594ca\") " Nov 28 13:22:09 crc kubenswrapper[4857]: I1128 13:22:09.759006 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f1ad4ded-68c6-4b81-a74b-017f2bb594ca-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "f1ad4ded-68c6-4b81-a74b-017f2bb594ca" (UID: "f1ad4ded-68c6-4b81-a74b-017f2bb594ca"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:22:09 crc kubenswrapper[4857]: I1128 13:22:09.778940 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1ad4ded-68c6-4b81-a74b-017f2bb594ca-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "f1ad4ded-68c6-4b81-a74b-017f2bb594ca" (UID: "f1ad4ded-68c6-4b81-a74b-017f2bb594ca"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:22:09 crc kubenswrapper[4857]: I1128 13:22:09.859622 4857 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f1ad4ded-68c6-4b81-a74b-017f2bb594ca-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:09 crc kubenswrapper[4857]: I1128 13:22:09.859656 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f1ad4ded-68c6-4b81-a74b-017f2bb594ca-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:09 crc kubenswrapper[4857]: I1128 13:22:09.916519 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wxvfq" podUID="7f760076-0358-4b0e-9b50-0d3d05d29a0e" containerName="registry-server" probeResult="failure" output=< Nov 28 13:22:09 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 13:22:09 crc kubenswrapper[4857]: > Nov 28 13:22:10 crc kubenswrapper[4857]: I1128 13:22:10.279347 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"f1ad4ded-68c6-4b81-a74b-017f2bb594ca","Type":"ContainerDied","Data":"4714a971980485f89d5fc070851c850dd292f974c9f95b5d37471171d6b8fbd9"} Nov 28 13:22:10 crc kubenswrapper[4857]: I1128 13:22:10.279661 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4714a971980485f89d5fc070851c850dd292f974c9f95b5d37471171d6b8fbd9" Nov 28 13:22:10 crc kubenswrapper[4857]: I1128 13:22:10.279365 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:22:10 crc kubenswrapper[4857]: E1128 13:22:10.411219 4857 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-podf1ad4ded_68c6_4b81_a74b_017f2bb594ca.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-podf1ad4ded_68c6_4b81_a74b_017f2bb594ca.slice/crio-4714a971980485f89d5fc070851c850dd292f974c9f95b5d37471171d6b8fbd9\": RecentStats: unable to find data in memory cache]" Nov 28 13:22:13 crc kubenswrapper[4857]: I1128 13:22:13.297696 4857 generic.go:334] "Generic (PLEG): container finished" podID="4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7" containerID="4203fb8a05b40b7e1e361aa2273359f2f893b2810a8ee1b09cc8f9bac79322f3" exitCode=0 Nov 28 13:22:13 crc kubenswrapper[4857]: I1128 13:22:13.297817 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwsxc" event={"ID":"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7","Type":"ContainerDied","Data":"4203fb8a05b40b7e1e361aa2273359f2f893b2810a8ee1b09cc8f9bac79322f3"} Nov 28 13:22:15 crc kubenswrapper[4857]: I1128 13:22:15.313584 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwsxc" event={"ID":"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7","Type":"ContainerStarted","Data":"82c405f912546c1d35557f4089b89ac42b8ca9e4b950757925da6c0bbe37292d"} Nov 28 13:22:15 crc kubenswrapper[4857]: I1128 13:22:15.334566 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vwsxc" podStartSLOduration=21.668931109 podStartE2EDuration="1m17.334545466s" podCreationTimestamp="2025-11-28 13:20:58 +0000 UTC" firstStartedPulling="2025-11-28 13:21:18.787246728 +0000 UTC m=+170.814621905" lastFinishedPulling="2025-11-28 13:22:14.452861085 +0000 UTC m=+226.480236262" observedRunningTime="2025-11-28 13:22:15.33400103 +0000 UTC m=+227.361376257" watchObservedRunningTime="2025-11-28 13:22:15.334545466 +0000 UTC m=+227.361920673" Nov 28 13:22:15 crc kubenswrapper[4857]: I1128 13:22:15.397453 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-b678s" Nov 28 13:22:15 crc kubenswrapper[4857]: I1128 13:22:15.397522 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-b678s" Nov 28 13:22:15 crc kubenswrapper[4857]: I1128 13:22:15.516631 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-b678s" Nov 28 13:22:15 crc kubenswrapper[4857]: I1128 13:22:15.615584 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-r2fq8" Nov 28 13:22:15 crc kubenswrapper[4857]: I1128 13:22:15.616049 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-r2fq8" Nov 28 13:22:15 crc kubenswrapper[4857]: I1128 13:22:15.682827 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-r2fq8" Nov 28 13:22:15 crc kubenswrapper[4857]: I1128 13:22:15.870932 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jml4b" Nov 28 13:22:15 crc kubenswrapper[4857]: I1128 13:22:15.871374 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jml4b" Nov 28 13:22:15 crc kubenswrapper[4857]: I1128 13:22:15.905031 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jml4b" Nov 28 13:22:16 crc kubenswrapper[4857]: I1128 13:22:16.307400 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-l2hg8" Nov 28 13:22:16 crc kubenswrapper[4857]: I1128 13:22:16.307780 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-l2hg8" Nov 28 13:22:16 crc kubenswrapper[4857]: I1128 13:22:16.416908 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jml4b" Nov 28 13:22:16 crc kubenswrapper[4857]: I1128 13:22:16.417498 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-r2fq8" Nov 28 13:22:16 crc kubenswrapper[4857]: I1128 13:22:16.426355 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-b678s" Nov 28 13:22:16 crc kubenswrapper[4857]: I1128 13:22:16.434744 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-l2hg8" Nov 28 13:22:17 crc kubenswrapper[4857]: I1128 13:22:17.349014 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jml4b"] Nov 28 13:22:17 crc kubenswrapper[4857]: I1128 13:22:17.419280 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-l2hg8" Nov 28 13:22:17 crc kubenswrapper[4857]: I1128 13:22:17.796073 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fsv5j" Nov 28 13:22:17 crc kubenswrapper[4857]: I1128 13:22:17.847623 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fsv5j" Nov 28 13:22:17 crc kubenswrapper[4857]: I1128 13:22:17.945665 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l2hg8"] Nov 28 13:22:18 crc kubenswrapper[4857]: I1128 13:22:18.282669 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-v6x5g" Nov 28 13:22:18 crc kubenswrapper[4857]: I1128 13:22:18.349949 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-v6x5g" Nov 28 13:22:18 crc kubenswrapper[4857]: I1128 13:22:18.825307 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vwsxc" Nov 28 13:22:18 crc kubenswrapper[4857]: I1128 13:22:18.825366 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vwsxc" Nov 28 13:22:18 crc kubenswrapper[4857]: I1128 13:22:18.912995 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wxvfq" Nov 28 13:22:18 crc kubenswrapper[4857]: I1128 13:22:18.960782 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wxvfq" Nov 28 13:22:19 crc kubenswrapper[4857]: I1128 13:22:19.342698 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-l2hg8" podUID="f6949630-5993-404f-8177-fddca689d6b1" containerName="registry-server" containerID="cri-o://96f3c97d24ed26f0109ac0328d65acac3ba7c4d48c28ba0ff2c308bd3fd0a64e" gracePeriod=2 Nov 28 13:22:19 crc kubenswrapper[4857]: I1128 13:22:19.343204 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jml4b" podUID="d34958af-3c7b-4821-8fa8-af2ec4591af5" containerName="registry-server" containerID="cri-o://80879ef75664bdbfb1e0299672ca41706942acc2affd837a2dd2a302b6ae3657" gracePeriod=2 Nov 28 13:22:19 crc kubenswrapper[4857]: I1128 13:22:19.873454 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vwsxc" podUID="4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7" containerName="registry-server" probeResult="failure" output=< Nov 28 13:22:19 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 13:22:19 crc kubenswrapper[4857]: > Nov 28 13:22:20 crc kubenswrapper[4857]: I1128 13:22:20.347690 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-v6x5g"] Nov 28 13:22:20 crc kubenswrapper[4857]: I1128 13:22:20.348258 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-v6x5g" podUID="5ad31e12-d359-4a81-8e56-5431c271c7ce" containerName="registry-server" containerID="cri-o://c4ec779224a265bcedcdae343651f3fec17516a0ca22e5ac6a5ddb55d776ecde" gracePeriod=2 Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.314654 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jml4b" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.319510 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l2hg8" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.328924 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2grhj\" (UniqueName: \"kubernetes.io/projected/d34958af-3c7b-4821-8fa8-af2ec4591af5-kube-api-access-2grhj\") pod \"d34958af-3c7b-4821-8fa8-af2ec4591af5\" (UID: \"d34958af-3c7b-4821-8fa8-af2ec4591af5\") " Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.329029 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d34958af-3c7b-4821-8fa8-af2ec4591af5-utilities\") pod \"d34958af-3c7b-4821-8fa8-af2ec4591af5\" (UID: \"d34958af-3c7b-4821-8fa8-af2ec4591af5\") " Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.329098 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d34958af-3c7b-4821-8fa8-af2ec4591af5-catalog-content\") pod \"d34958af-3c7b-4821-8fa8-af2ec4591af5\" (UID: \"d34958af-3c7b-4821-8fa8-af2ec4591af5\") " Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.330242 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d34958af-3c7b-4821-8fa8-af2ec4591af5-utilities" (OuterVolumeSpecName: "utilities") pod "d34958af-3c7b-4821-8fa8-af2ec4591af5" (UID: "d34958af-3c7b-4821-8fa8-af2ec4591af5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.358437 4857 generic.go:334] "Generic (PLEG): container finished" podID="5ad31e12-d359-4a81-8e56-5431c271c7ce" containerID="c4ec779224a265bcedcdae343651f3fec17516a0ca22e5ac6a5ddb55d776ecde" exitCode=0 Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.358930 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v6x5g" event={"ID":"5ad31e12-d359-4a81-8e56-5431c271c7ce","Type":"ContainerDied","Data":"c4ec779224a265bcedcdae343651f3fec17516a0ca22e5ac6a5ddb55d776ecde"} Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.359692 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d34958af-3c7b-4821-8fa8-af2ec4591af5-kube-api-access-2grhj" (OuterVolumeSpecName: "kube-api-access-2grhj") pod "d34958af-3c7b-4821-8fa8-af2ec4591af5" (UID: "d34958af-3c7b-4821-8fa8-af2ec4591af5"). InnerVolumeSpecName "kube-api-access-2grhj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.368879 4857 generic.go:334] "Generic (PLEG): container finished" podID="f6949630-5993-404f-8177-fddca689d6b1" containerID="96f3c97d24ed26f0109ac0328d65acac3ba7c4d48c28ba0ff2c308bd3fd0a64e" exitCode=0 Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.368942 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2hg8" event={"ID":"f6949630-5993-404f-8177-fddca689d6b1","Type":"ContainerDied","Data":"96f3c97d24ed26f0109ac0328d65acac3ba7c4d48c28ba0ff2c308bd3fd0a64e"} Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.368968 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2hg8" event={"ID":"f6949630-5993-404f-8177-fddca689d6b1","Type":"ContainerDied","Data":"17783e178fab886a5994e153718a8d419f5cc4622bba7d13f2793397a7cfcde7"} Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.368986 4857 scope.go:117] "RemoveContainer" containerID="96f3c97d24ed26f0109ac0328d65acac3ba7c4d48c28ba0ff2c308bd3fd0a64e" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.369101 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l2hg8" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.372618 4857 generic.go:334] "Generic (PLEG): container finished" podID="d34958af-3c7b-4821-8fa8-af2ec4591af5" containerID="80879ef75664bdbfb1e0299672ca41706942acc2affd837a2dd2a302b6ae3657" exitCode=0 Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.372650 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jml4b" event={"ID":"d34958af-3c7b-4821-8fa8-af2ec4591af5","Type":"ContainerDied","Data":"80879ef75664bdbfb1e0299672ca41706942acc2affd837a2dd2a302b6ae3657"} Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.372666 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jml4b" event={"ID":"d34958af-3c7b-4821-8fa8-af2ec4591af5","Type":"ContainerDied","Data":"1eef2ba9f93016fa339b5c1f7c5615002b5970b283af0230977badb6f726a2e7"} Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.372712 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jml4b" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.382094 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d34958af-3c7b-4821-8fa8-af2ec4591af5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d34958af-3c7b-4821-8fa8-af2ec4591af5" (UID: "d34958af-3c7b-4821-8fa8-af2ec4591af5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.389810 4857 scope.go:117] "RemoveContainer" containerID="8f12a7afe35692eeacced148fb6346bf486a60332d869b84d3f5564602f63563" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.407648 4857 scope.go:117] "RemoveContainer" containerID="e4039bf0ffa4b0f8bf7682bfcfedfe526fda9bed9cb2a64706bcd78b12319931" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.423290 4857 scope.go:117] "RemoveContainer" containerID="96f3c97d24ed26f0109ac0328d65acac3ba7c4d48c28ba0ff2c308bd3fd0a64e" Nov 28 13:22:21 crc kubenswrapper[4857]: E1128 13:22:21.423672 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96f3c97d24ed26f0109ac0328d65acac3ba7c4d48c28ba0ff2c308bd3fd0a64e\": container with ID starting with 96f3c97d24ed26f0109ac0328d65acac3ba7c4d48c28ba0ff2c308bd3fd0a64e not found: ID does not exist" containerID="96f3c97d24ed26f0109ac0328d65acac3ba7c4d48c28ba0ff2c308bd3fd0a64e" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.423720 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96f3c97d24ed26f0109ac0328d65acac3ba7c4d48c28ba0ff2c308bd3fd0a64e"} err="failed to get container status \"96f3c97d24ed26f0109ac0328d65acac3ba7c4d48c28ba0ff2c308bd3fd0a64e\": rpc error: code = NotFound desc = could not find container \"96f3c97d24ed26f0109ac0328d65acac3ba7c4d48c28ba0ff2c308bd3fd0a64e\": container with ID starting with 96f3c97d24ed26f0109ac0328d65acac3ba7c4d48c28ba0ff2c308bd3fd0a64e not found: ID does not exist" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.423772 4857 scope.go:117] "RemoveContainer" containerID="8f12a7afe35692eeacced148fb6346bf486a60332d869b84d3f5564602f63563" Nov 28 13:22:21 crc kubenswrapper[4857]: E1128 13:22:21.424214 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f12a7afe35692eeacced148fb6346bf486a60332d869b84d3f5564602f63563\": container with ID starting with 8f12a7afe35692eeacced148fb6346bf486a60332d869b84d3f5564602f63563 not found: ID does not exist" containerID="8f12a7afe35692eeacced148fb6346bf486a60332d869b84d3f5564602f63563" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.424255 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f12a7afe35692eeacced148fb6346bf486a60332d869b84d3f5564602f63563"} err="failed to get container status \"8f12a7afe35692eeacced148fb6346bf486a60332d869b84d3f5564602f63563\": rpc error: code = NotFound desc = could not find container \"8f12a7afe35692eeacced148fb6346bf486a60332d869b84d3f5564602f63563\": container with ID starting with 8f12a7afe35692eeacced148fb6346bf486a60332d869b84d3f5564602f63563 not found: ID does not exist" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.424300 4857 scope.go:117] "RemoveContainer" containerID="e4039bf0ffa4b0f8bf7682bfcfedfe526fda9bed9cb2a64706bcd78b12319931" Nov 28 13:22:21 crc kubenswrapper[4857]: E1128 13:22:21.424628 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4039bf0ffa4b0f8bf7682bfcfedfe526fda9bed9cb2a64706bcd78b12319931\": container with ID starting with e4039bf0ffa4b0f8bf7682bfcfedfe526fda9bed9cb2a64706bcd78b12319931 not found: ID does not exist" containerID="e4039bf0ffa4b0f8bf7682bfcfedfe526fda9bed9cb2a64706bcd78b12319931" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.424648 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4039bf0ffa4b0f8bf7682bfcfedfe526fda9bed9cb2a64706bcd78b12319931"} err="failed to get container status \"e4039bf0ffa4b0f8bf7682bfcfedfe526fda9bed9cb2a64706bcd78b12319931\": rpc error: code = NotFound desc = could not find container \"e4039bf0ffa4b0f8bf7682bfcfedfe526fda9bed9cb2a64706bcd78b12319931\": container with ID starting with e4039bf0ffa4b0f8bf7682bfcfedfe526fda9bed9cb2a64706bcd78b12319931 not found: ID does not exist" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.424676 4857 scope.go:117] "RemoveContainer" containerID="80879ef75664bdbfb1e0299672ca41706942acc2affd837a2dd2a302b6ae3657" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.429902 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6949630-5993-404f-8177-fddca689d6b1-utilities\") pod \"f6949630-5993-404f-8177-fddca689d6b1\" (UID: \"f6949630-5993-404f-8177-fddca689d6b1\") " Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.429942 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6949630-5993-404f-8177-fddca689d6b1-catalog-content\") pod \"f6949630-5993-404f-8177-fddca689d6b1\" (UID: \"f6949630-5993-404f-8177-fddca689d6b1\") " Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.430069 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-klczj\" (UniqueName: \"kubernetes.io/projected/f6949630-5993-404f-8177-fddca689d6b1-kube-api-access-klczj\") pod \"f6949630-5993-404f-8177-fddca689d6b1\" (UID: \"f6949630-5993-404f-8177-fddca689d6b1\") " Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.430260 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2grhj\" (UniqueName: \"kubernetes.io/projected/d34958af-3c7b-4821-8fa8-af2ec4591af5-kube-api-access-2grhj\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.430277 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d34958af-3c7b-4821-8fa8-af2ec4591af5-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.430285 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d34958af-3c7b-4821-8fa8-af2ec4591af5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.430643 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6949630-5993-404f-8177-fddca689d6b1-utilities" (OuterVolumeSpecName: "utilities") pod "f6949630-5993-404f-8177-fddca689d6b1" (UID: "f6949630-5993-404f-8177-fddca689d6b1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.433075 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6949630-5993-404f-8177-fddca689d6b1-kube-api-access-klczj" (OuterVolumeSpecName: "kube-api-access-klczj") pod "f6949630-5993-404f-8177-fddca689d6b1" (UID: "f6949630-5993-404f-8177-fddca689d6b1"). InnerVolumeSpecName "kube-api-access-klczj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.437271 4857 scope.go:117] "RemoveContainer" containerID="4b2a6195c98724679524bd298e93b827ccb967a70a0c0ff283c8e4102787927e" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.452494 4857 scope.go:117] "RemoveContainer" containerID="2247befdc773cb92ee44212383dd910c2c8c68767e9526b3232922a8ed757d1e" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.464572 4857 scope.go:117] "RemoveContainer" containerID="80879ef75664bdbfb1e0299672ca41706942acc2affd837a2dd2a302b6ae3657" Nov 28 13:22:21 crc kubenswrapper[4857]: E1128 13:22:21.464844 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80879ef75664bdbfb1e0299672ca41706942acc2affd837a2dd2a302b6ae3657\": container with ID starting with 80879ef75664bdbfb1e0299672ca41706942acc2affd837a2dd2a302b6ae3657 not found: ID does not exist" containerID="80879ef75664bdbfb1e0299672ca41706942acc2affd837a2dd2a302b6ae3657" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.464883 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80879ef75664bdbfb1e0299672ca41706942acc2affd837a2dd2a302b6ae3657"} err="failed to get container status \"80879ef75664bdbfb1e0299672ca41706942acc2affd837a2dd2a302b6ae3657\": rpc error: code = NotFound desc = could not find container \"80879ef75664bdbfb1e0299672ca41706942acc2affd837a2dd2a302b6ae3657\": container with ID starting with 80879ef75664bdbfb1e0299672ca41706942acc2affd837a2dd2a302b6ae3657 not found: ID does not exist" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.464907 4857 scope.go:117] "RemoveContainer" containerID="4b2a6195c98724679524bd298e93b827ccb967a70a0c0ff283c8e4102787927e" Nov 28 13:22:21 crc kubenswrapper[4857]: E1128 13:22:21.465102 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b2a6195c98724679524bd298e93b827ccb967a70a0c0ff283c8e4102787927e\": container with ID starting with 4b2a6195c98724679524bd298e93b827ccb967a70a0c0ff283c8e4102787927e not found: ID does not exist" containerID="4b2a6195c98724679524bd298e93b827ccb967a70a0c0ff283c8e4102787927e" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.465129 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b2a6195c98724679524bd298e93b827ccb967a70a0c0ff283c8e4102787927e"} err="failed to get container status \"4b2a6195c98724679524bd298e93b827ccb967a70a0c0ff283c8e4102787927e\": rpc error: code = NotFound desc = could not find container \"4b2a6195c98724679524bd298e93b827ccb967a70a0c0ff283c8e4102787927e\": container with ID starting with 4b2a6195c98724679524bd298e93b827ccb967a70a0c0ff283c8e4102787927e not found: ID does not exist" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.465144 4857 scope.go:117] "RemoveContainer" containerID="2247befdc773cb92ee44212383dd910c2c8c68767e9526b3232922a8ed757d1e" Nov 28 13:22:21 crc kubenswrapper[4857]: E1128 13:22:21.465448 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2247befdc773cb92ee44212383dd910c2c8c68767e9526b3232922a8ed757d1e\": container with ID starting with 2247befdc773cb92ee44212383dd910c2c8c68767e9526b3232922a8ed757d1e not found: ID does not exist" containerID="2247befdc773cb92ee44212383dd910c2c8c68767e9526b3232922a8ed757d1e" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.465479 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2247befdc773cb92ee44212383dd910c2c8c68767e9526b3232922a8ed757d1e"} err="failed to get container status \"2247befdc773cb92ee44212383dd910c2c8c68767e9526b3232922a8ed757d1e\": rpc error: code = NotFound desc = could not find container \"2247befdc773cb92ee44212383dd910c2c8c68767e9526b3232922a8ed757d1e\": container with ID starting with 2247befdc773cb92ee44212383dd910c2c8c68767e9526b3232922a8ed757d1e not found: ID does not exist" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.480480 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6949630-5993-404f-8177-fddca689d6b1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f6949630-5993-404f-8177-fddca689d6b1" (UID: "f6949630-5993-404f-8177-fddca689d6b1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.531578 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-klczj\" (UniqueName: \"kubernetes.io/projected/f6949630-5993-404f-8177-fddca689d6b1-kube-api-access-klczj\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.531630 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6949630-5993-404f-8177-fddca689d6b1-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.531641 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6949630-5993-404f-8177-fddca689d6b1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.701663 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l2hg8"] Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.705628 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-l2hg8"] Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.714736 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jml4b"] Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.717374 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jml4b"] Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.804670 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v6x5g" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.835452 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrtj5\" (UniqueName: \"kubernetes.io/projected/5ad31e12-d359-4a81-8e56-5431c271c7ce-kube-api-access-wrtj5\") pod \"5ad31e12-d359-4a81-8e56-5431c271c7ce\" (UID: \"5ad31e12-d359-4a81-8e56-5431c271c7ce\") " Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.835513 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ad31e12-d359-4a81-8e56-5431c271c7ce-utilities\") pod \"5ad31e12-d359-4a81-8e56-5431c271c7ce\" (UID: \"5ad31e12-d359-4a81-8e56-5431c271c7ce\") " Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.835572 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ad31e12-d359-4a81-8e56-5431c271c7ce-catalog-content\") pod \"5ad31e12-d359-4a81-8e56-5431c271c7ce\" (UID: \"5ad31e12-d359-4a81-8e56-5431c271c7ce\") " Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.836624 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ad31e12-d359-4a81-8e56-5431c271c7ce-utilities" (OuterVolumeSpecName: "utilities") pod "5ad31e12-d359-4a81-8e56-5431c271c7ce" (UID: "5ad31e12-d359-4a81-8e56-5431c271c7ce"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.839650 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ad31e12-d359-4a81-8e56-5431c271c7ce-kube-api-access-wrtj5" (OuterVolumeSpecName: "kube-api-access-wrtj5") pod "5ad31e12-d359-4a81-8e56-5431c271c7ce" (UID: "5ad31e12-d359-4a81-8e56-5431c271c7ce"). InnerVolumeSpecName "kube-api-access-wrtj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.860092 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ad31e12-d359-4a81-8e56-5431c271c7ce-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5ad31e12-d359-4a81-8e56-5431c271c7ce" (UID: "5ad31e12-d359-4a81-8e56-5431c271c7ce"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.936888 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrtj5\" (UniqueName: \"kubernetes.io/projected/5ad31e12-d359-4a81-8e56-5431c271c7ce-kube-api-access-wrtj5\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.936934 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ad31e12-d359-4a81-8e56-5431c271c7ce-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:21 crc kubenswrapper[4857]: I1128 13:22:21.936947 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ad31e12-d359-4a81-8e56-5431c271c7ce-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:22 crc kubenswrapper[4857]: I1128 13:22:22.319514 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d34958af-3c7b-4821-8fa8-af2ec4591af5" path="/var/lib/kubelet/pods/d34958af-3c7b-4821-8fa8-af2ec4591af5/volumes" Nov 28 13:22:22 crc kubenswrapper[4857]: I1128 13:22:22.320554 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6949630-5993-404f-8177-fddca689d6b1" path="/var/lib/kubelet/pods/f6949630-5993-404f-8177-fddca689d6b1/volumes" Nov 28 13:22:22 crc kubenswrapper[4857]: I1128 13:22:22.388009 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v6x5g" event={"ID":"5ad31e12-d359-4a81-8e56-5431c271c7ce","Type":"ContainerDied","Data":"26871187fa4b729ef2c700ea86ac0fb57b6300235f06c5c4be7c1aa26df55d81"} Nov 28 13:22:22 crc kubenswrapper[4857]: I1128 13:22:22.388056 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v6x5g" Nov 28 13:22:22 crc kubenswrapper[4857]: I1128 13:22:22.388072 4857 scope.go:117] "RemoveContainer" containerID="c4ec779224a265bcedcdae343651f3fec17516a0ca22e5ac6a5ddb55d776ecde" Nov 28 13:22:22 crc kubenswrapper[4857]: I1128 13:22:22.413568 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-v6x5g"] Nov 28 13:22:22 crc kubenswrapper[4857]: I1128 13:22:22.415918 4857 scope.go:117] "RemoveContainer" containerID="af44d88a1ab662afc0e537f2e70fb2550f70303380ce0aa5e5ef88af7f8efc34" Nov 28 13:22:22 crc kubenswrapper[4857]: I1128 13:22:22.418411 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-v6x5g"] Nov 28 13:22:22 crc kubenswrapper[4857]: I1128 13:22:22.430087 4857 scope.go:117] "RemoveContainer" containerID="cdb522b401bb2dda1e539cb281379fcc122b5e007880fe113bf09a95a1cd1acc" Nov 28 13:22:24 crc kubenswrapper[4857]: I1128 13:22:24.316988 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ad31e12-d359-4a81-8e56-5431c271c7ce" path="/var/lib/kubelet/pods/5ad31e12-d359-4a81-8e56-5431c271c7ce/volumes" Nov 28 13:22:28 crc kubenswrapper[4857]: I1128 13:22:28.867103 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vwsxc" Nov 28 13:22:28 crc kubenswrapper[4857]: I1128 13:22:28.918104 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vwsxc" Nov 28 13:22:31 crc kubenswrapper[4857]: I1128 13:22:31.240070 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" podUID="0eafa688-6c78-44bc-93de-6e300a65a036" containerName="oauth-openshift" containerID="cri-o://52b70e6da85f4ab3ca9c4cc9ce66767add145ed223dd6e0e67e4fb68f65d5dd7" gracePeriod=15 Nov 28 13:22:31 crc kubenswrapper[4857]: I1128 13:22:31.342631 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vwsxc"] Nov 28 13:22:31 crc kubenswrapper[4857]: I1128 13:22:31.342886 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vwsxc" podUID="4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7" containerName="registry-server" containerID="cri-o://82c405f912546c1d35557f4089b89ac42b8ca9e4b950757925da6c0bbe37292d" gracePeriod=2 Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.420012 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vwsxc" Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.480452 4857 generic.go:334] "Generic (PLEG): container finished" podID="0eafa688-6c78-44bc-93de-6e300a65a036" containerID="52b70e6da85f4ab3ca9c4cc9ce66767add145ed223dd6e0e67e4fb68f65d5dd7" exitCode=0 Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.480511 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" event={"ID":"0eafa688-6c78-44bc-93de-6e300a65a036","Type":"ContainerDied","Data":"52b70e6da85f4ab3ca9c4cc9ce66767add145ed223dd6e0e67e4fb68f65d5dd7"} Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.484194 4857 generic.go:334] "Generic (PLEG): container finished" podID="4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7" containerID="82c405f912546c1d35557f4089b89ac42b8ca9e4b950757925da6c0bbe37292d" exitCode=0 Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.484246 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwsxc" event={"ID":"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7","Type":"ContainerDied","Data":"82c405f912546c1d35557f4089b89ac42b8ca9e4b950757925da6c0bbe37292d"} Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.484312 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwsxc" event={"ID":"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7","Type":"ContainerDied","Data":"1ff10c3647ec28a2712f32a1ccbabc00d3cb32a55e0b94ff66fb76342e4ff3ea"} Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.484326 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vwsxc" Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.484338 4857 scope.go:117] "RemoveContainer" containerID="82c405f912546c1d35557f4089b89ac42b8ca9e4b950757925da6c0bbe37292d" Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.506843 4857 scope.go:117] "RemoveContainer" containerID="4203fb8a05b40b7e1e361aa2273359f2f893b2810a8ee1b09cc8f9bac79322f3" Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.506912 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7-catalog-content\") pod \"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7\" (UID: \"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7\") " Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.506964 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7tq8w\" (UniqueName: \"kubernetes.io/projected/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7-kube-api-access-7tq8w\") pod \"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7\" (UID: \"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7\") " Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.507016 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7-utilities\") pod \"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7\" (UID: \"4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7\") " Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.508371 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7-utilities" (OuterVolumeSpecName: "utilities") pod "4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7" (UID: "4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.517171 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7-kube-api-access-7tq8w" (OuterVolumeSpecName: "kube-api-access-7tq8w") pod "4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7" (UID: "4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7"). InnerVolumeSpecName "kube-api-access-7tq8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.535545 4857 scope.go:117] "RemoveContainer" containerID="9df2df3deee81e06ae3a9a1c744f1d3181a13bb5af59a2e1f7a3db02c8816fdb" Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.577324 4857 scope.go:117] "RemoveContainer" containerID="82c405f912546c1d35557f4089b89ac42b8ca9e4b950757925da6c0bbe37292d" Nov 28 13:22:34 crc kubenswrapper[4857]: E1128 13:22:34.577868 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82c405f912546c1d35557f4089b89ac42b8ca9e4b950757925da6c0bbe37292d\": container with ID starting with 82c405f912546c1d35557f4089b89ac42b8ca9e4b950757925da6c0bbe37292d not found: ID does not exist" containerID="82c405f912546c1d35557f4089b89ac42b8ca9e4b950757925da6c0bbe37292d" Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.577934 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82c405f912546c1d35557f4089b89ac42b8ca9e4b950757925da6c0bbe37292d"} err="failed to get container status \"82c405f912546c1d35557f4089b89ac42b8ca9e4b950757925da6c0bbe37292d\": rpc error: code = NotFound desc = could not find container \"82c405f912546c1d35557f4089b89ac42b8ca9e4b950757925da6c0bbe37292d\": container with ID starting with 82c405f912546c1d35557f4089b89ac42b8ca9e4b950757925da6c0bbe37292d not found: ID does not exist" Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.577972 4857 scope.go:117] "RemoveContainer" containerID="4203fb8a05b40b7e1e361aa2273359f2f893b2810a8ee1b09cc8f9bac79322f3" Nov 28 13:22:34 crc kubenswrapper[4857]: E1128 13:22:34.578484 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4203fb8a05b40b7e1e361aa2273359f2f893b2810a8ee1b09cc8f9bac79322f3\": container with ID starting with 4203fb8a05b40b7e1e361aa2273359f2f893b2810a8ee1b09cc8f9bac79322f3 not found: ID does not exist" containerID="4203fb8a05b40b7e1e361aa2273359f2f893b2810a8ee1b09cc8f9bac79322f3" Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.578797 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4203fb8a05b40b7e1e361aa2273359f2f893b2810a8ee1b09cc8f9bac79322f3"} err="failed to get container status \"4203fb8a05b40b7e1e361aa2273359f2f893b2810a8ee1b09cc8f9bac79322f3\": rpc error: code = NotFound desc = could not find container \"4203fb8a05b40b7e1e361aa2273359f2f893b2810a8ee1b09cc8f9bac79322f3\": container with ID starting with 4203fb8a05b40b7e1e361aa2273359f2f893b2810a8ee1b09cc8f9bac79322f3 not found: ID does not exist" Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.578841 4857 scope.go:117] "RemoveContainer" containerID="9df2df3deee81e06ae3a9a1c744f1d3181a13bb5af59a2e1f7a3db02c8816fdb" Nov 28 13:22:34 crc kubenswrapper[4857]: E1128 13:22:34.579561 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9df2df3deee81e06ae3a9a1c744f1d3181a13bb5af59a2e1f7a3db02c8816fdb\": container with ID starting with 9df2df3deee81e06ae3a9a1c744f1d3181a13bb5af59a2e1f7a3db02c8816fdb not found: ID does not exist" containerID="9df2df3deee81e06ae3a9a1c744f1d3181a13bb5af59a2e1f7a3db02c8816fdb" Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.579656 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9df2df3deee81e06ae3a9a1c744f1d3181a13bb5af59a2e1f7a3db02c8816fdb"} err="failed to get container status \"9df2df3deee81e06ae3a9a1c744f1d3181a13bb5af59a2e1f7a3db02c8816fdb\": rpc error: code = NotFound desc = could not find container \"9df2df3deee81e06ae3a9a1c744f1d3181a13bb5af59a2e1f7a3db02c8816fdb\": container with ID starting with 9df2df3deee81e06ae3a9a1c744f1d3181a13bb5af59a2e1f7a3db02c8816fdb not found: ID does not exist" Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.608635 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7tq8w\" (UniqueName: \"kubernetes.io/projected/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7-kube-api-access-7tq8w\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.608737 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.661374 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7" (UID: "4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.711074 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.833499 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vwsxc"] Nov 28 13:22:34 crc kubenswrapper[4857]: I1128 13:22:34.841439 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vwsxc"] Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.444600 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.491744 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" event={"ID":"0eafa688-6c78-44bc-93de-6e300a65a036","Type":"ContainerDied","Data":"61fb7be051bb07a822e00fd7efb499de80a29fffdb2bedf0c616cbd5b383d6fc"} Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.491801 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-hzw48" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.491815 4857 scope.go:117] "RemoveContainer" containerID="52b70e6da85f4ab3ca9c4cc9ce66767add145ed223dd6e0e67e4fb68f65d5dd7" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.525937 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-ocp-branding-template\") pod \"0eafa688-6c78-44bc-93de-6e300a65a036\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.525993 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-serving-cert\") pod \"0eafa688-6c78-44bc-93de-6e300a65a036\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.526010 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-template-provider-selection\") pod \"0eafa688-6c78-44bc-93de-6e300a65a036\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.526406 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-audit-policies\") pod \"0eafa688-6c78-44bc-93de-6e300a65a036\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.526591 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-idp-0-file-data\") pod \"0eafa688-6c78-44bc-93de-6e300a65a036\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.526786 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-template-login\") pod \"0eafa688-6c78-44bc-93de-6e300a65a036\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.526952 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-trusted-ca-bundle\") pod \"0eafa688-6c78-44bc-93de-6e300a65a036\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.527165 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-router-certs\") pod \"0eafa688-6c78-44bc-93de-6e300a65a036\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.527686 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2zmdv\" (UniqueName: \"kubernetes.io/projected/0eafa688-6c78-44bc-93de-6e300a65a036-kube-api-access-2zmdv\") pod \"0eafa688-6c78-44bc-93de-6e300a65a036\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.527977 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-cliconfig\") pod \"0eafa688-6c78-44bc-93de-6e300a65a036\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.528108 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-template-error\") pod \"0eafa688-6c78-44bc-93de-6e300a65a036\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.528216 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-session\") pod \"0eafa688-6c78-44bc-93de-6e300a65a036\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.528440 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-service-ca\") pod \"0eafa688-6c78-44bc-93de-6e300a65a036\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.528542 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0eafa688-6c78-44bc-93de-6e300a65a036-audit-dir\") pod \"0eafa688-6c78-44bc-93de-6e300a65a036\" (UID: \"0eafa688-6c78-44bc-93de-6e300a65a036\") " Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.527721 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "0eafa688-6c78-44bc-93de-6e300a65a036" (UID: "0eafa688-6c78-44bc-93de-6e300a65a036"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.527894 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "0eafa688-6c78-44bc-93de-6e300a65a036" (UID: "0eafa688-6c78-44bc-93de-6e300a65a036"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.528577 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "0eafa688-6c78-44bc-93de-6e300a65a036" (UID: "0eafa688-6c78-44bc-93de-6e300a65a036"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.529055 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0eafa688-6c78-44bc-93de-6e300a65a036-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "0eafa688-6c78-44bc-93de-6e300a65a036" (UID: "0eafa688-6c78-44bc-93de-6e300a65a036"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.529293 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "0eafa688-6c78-44bc-93de-6e300a65a036" (UID: "0eafa688-6c78-44bc-93de-6e300a65a036"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.535208 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "0eafa688-6c78-44bc-93de-6e300a65a036" (UID: "0eafa688-6c78-44bc-93de-6e300a65a036"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.535303 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "0eafa688-6c78-44bc-93de-6e300a65a036" (UID: "0eafa688-6c78-44bc-93de-6e300a65a036"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.535574 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "0eafa688-6c78-44bc-93de-6e300a65a036" (UID: "0eafa688-6c78-44bc-93de-6e300a65a036"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.535617 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "0eafa688-6c78-44bc-93de-6e300a65a036" (UID: "0eafa688-6c78-44bc-93de-6e300a65a036"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.535975 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "0eafa688-6c78-44bc-93de-6e300a65a036" (UID: "0eafa688-6c78-44bc-93de-6e300a65a036"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.536106 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "0eafa688-6c78-44bc-93de-6e300a65a036" (UID: "0eafa688-6c78-44bc-93de-6e300a65a036"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.536434 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "0eafa688-6c78-44bc-93de-6e300a65a036" (UID: "0eafa688-6c78-44bc-93de-6e300a65a036"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.536483 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0eafa688-6c78-44bc-93de-6e300a65a036-kube-api-access-2zmdv" (OuterVolumeSpecName: "kube-api-access-2zmdv") pod "0eafa688-6c78-44bc-93de-6e300a65a036" (UID: "0eafa688-6c78-44bc-93de-6e300a65a036"). InnerVolumeSpecName "kube-api-access-2zmdv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.537012 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "0eafa688-6c78-44bc-93de-6e300a65a036" (UID: "0eafa688-6c78-44bc-93de-6e300a65a036"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.630580 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.630622 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2zmdv\" (UniqueName: \"kubernetes.io/projected/0eafa688-6c78-44bc-93de-6e300a65a036-kube-api-access-2zmdv\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.630636 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.630648 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.630661 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.630672 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.630686 4857 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0eafa688-6c78-44bc-93de-6e300a65a036-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.630701 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.630714 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.630726 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.630739 4857 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.630775 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.630795 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.630811 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0eafa688-6c78-44bc-93de-6e300a65a036-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.825618 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-hzw48"] Nov 28 13:22:35 crc kubenswrapper[4857]: I1128 13:22:35.832343 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-hzw48"] Nov 28 13:22:36 crc kubenswrapper[4857]: I1128 13:22:36.322134 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0eafa688-6c78-44bc-93de-6e300a65a036" path="/var/lib/kubelet/pods/0eafa688-6c78-44bc-93de-6e300a65a036/volumes" Nov 28 13:22:36 crc kubenswrapper[4857]: I1128 13:22:36.323543 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7" path="/var/lib/kubelet/pods/4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7/volumes" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.303128 4857 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.303930 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eafa688-6c78-44bc-93de-6e300a65a036" containerName="oauth-openshift" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.303946 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eafa688-6c78-44bc-93de-6e300a65a036" containerName="oauth-openshift" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.303956 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ad31e12-d359-4a81-8e56-5431c271c7ce" containerName="extract-utilities" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.303963 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ad31e12-d359-4a81-8e56-5431c271c7ce" containerName="extract-utilities" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.303974 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d34958af-3c7b-4821-8fa8-af2ec4591af5" containerName="extract-content" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.303983 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d34958af-3c7b-4821-8fa8-af2ec4591af5" containerName="extract-content" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.303993 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7" containerName="extract-content" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.304001 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7" containerName="extract-content" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.304009 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d34958af-3c7b-4821-8fa8-af2ec4591af5" containerName="registry-server" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.304019 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d34958af-3c7b-4821-8fa8-af2ec4591af5" containerName="registry-server" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.304032 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6949630-5993-404f-8177-fddca689d6b1" containerName="registry-server" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.304039 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6949630-5993-404f-8177-fddca689d6b1" containerName="registry-server" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.304049 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d34958af-3c7b-4821-8fa8-af2ec4591af5" containerName="extract-utilities" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.304056 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d34958af-3c7b-4821-8fa8-af2ec4591af5" containerName="extract-utilities" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.304064 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1ad4ded-68c6-4b81-a74b-017f2bb594ca" containerName="pruner" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.304084 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1ad4ded-68c6-4b81-a74b-017f2bb594ca" containerName="pruner" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.304094 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7" containerName="extract-utilities" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.304101 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7" containerName="extract-utilities" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.304113 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ad31e12-d359-4a81-8e56-5431c271c7ce" containerName="extract-content" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.304122 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ad31e12-d359-4a81-8e56-5431c271c7ce" containerName="extract-content" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.304130 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6949630-5993-404f-8177-fddca689d6b1" containerName="extract-content" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.304138 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6949630-5993-404f-8177-fddca689d6b1" containerName="extract-content" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.304147 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7" containerName="registry-server" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.304153 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7" containerName="registry-server" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.304164 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6949630-5993-404f-8177-fddca689d6b1" containerName="extract-utilities" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.304171 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6949630-5993-404f-8177-fddca689d6b1" containerName="extract-utilities" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.304184 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ad31e12-d359-4a81-8e56-5431c271c7ce" containerName="registry-server" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.304191 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ad31e12-d359-4a81-8e56-5431c271c7ce" containerName="registry-server" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.304298 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d34958af-3c7b-4821-8fa8-af2ec4591af5" containerName="registry-server" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.304309 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a249f55-f58e-4c61-a3f2-15dbe5bf2cc7" containerName="registry-server" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.304326 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ad31e12-d359-4a81-8e56-5431c271c7ce" containerName="registry-server" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.304341 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6949630-5993-404f-8177-fddca689d6b1" containerName="registry-server" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.304348 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1ad4ded-68c6-4b81-a74b-017f2bb594ca" containerName="pruner" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.304357 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0eafa688-6c78-44bc-93de-6e300a65a036" containerName="oauth-openshift" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.304771 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.335941 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.336043 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.336313 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.336482 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.337018 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.352196 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.353952 4857 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.354431 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee" gracePeriod=15 Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.354502 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4" gracePeriod=15 Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.354553 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7" gracePeriod=15 Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.354482 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b" gracePeriod=15 Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.354603 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49" gracePeriod=15 Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.356900 4857 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.366915 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.366959 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.366983 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.366992 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.367002 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.367011 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.367021 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.367032 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.367066 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.367077 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.367094 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.367104 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.367124 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.367133 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.367287 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.367300 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.367314 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.367328 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.367337 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.367348 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.438494 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.438544 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.438600 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.438651 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.438679 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.438699 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.438775 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.438809 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.438831 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.438848 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.438869 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.438908 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.438968 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.540199 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.540547 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.540588 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.540322 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.540677 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.540729 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.543456 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.544916 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.545717 4857 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b" exitCode=0 Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.545764 4857 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49" exitCode=0 Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.545775 4857 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4" exitCode=0 Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.545786 4857 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7" exitCode=2 Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.545819 4857 scope.go:117] "RemoveContainer" containerID="8d037ae3253651e2b20711bf6682ca8f7f0e1f3f96fcf47cbff4130cdcb0868c" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.547848 4857 generic.go:334] "Generic (PLEG): container finished" podID="2a2aed81-340c-4ac4-993e-409deefce9b8" containerID="050d2a56bb9f6fadaef1b1727096ced6ca0f3bc1334bf2b0bc463ce637e94633" exitCode=0 Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.547893 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"2a2aed81-340c-4ac4-993e-409deefce9b8","Type":"ContainerDied","Data":"050d2a56bb9f6fadaef1b1727096ced6ca0f3bc1334bf2b0bc463ce637e94633"} Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.548692 4857 status_manager.go:851] "Failed to get status for pod" podUID="2a2aed81-340c-4ac4-993e-409deefce9b8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.549025 4857 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.549229 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:43 crc kubenswrapper[4857]: I1128 13:22:43.641197 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:22:43 crc kubenswrapper[4857]: E1128 13:22:43.666848 4857 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.227:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c2e649bb5d6d0 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 13:22:43.666384592 +0000 UTC m=+255.693759759,LastTimestamp:2025-11-28 13:22:43.666384592 +0000 UTC m=+255.693759759,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 13:22:44 crc kubenswrapper[4857]: I1128 13:22:44.559474 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 13:22:44 crc kubenswrapper[4857]: I1128 13:22:44.561987 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"dbe33186017c978b49351c697dff50f51b6f5341963e46e5d7c68bf3ed7de188"} Nov 28 13:22:44 crc kubenswrapper[4857]: I1128 13:22:44.562060 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"2c39e9a5076bb388b08e0d1e439d6c53a187ceceec2e8b51d3c5c6cbe1a3470e"} Nov 28 13:22:44 crc kubenswrapper[4857]: I1128 13:22:44.562891 4857 status_manager.go:851] "Failed to get status for pod" podUID="2a2aed81-340c-4ac4-993e-409deefce9b8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:44 crc kubenswrapper[4857]: I1128 13:22:44.563367 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:44 crc kubenswrapper[4857]: I1128 13:22:44.862079 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:22:44 crc kubenswrapper[4857]: I1128 13:22:44.863317 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:44 crc kubenswrapper[4857]: I1128 13:22:44.863876 4857 status_manager.go:851] "Failed to get status for pod" podUID="2a2aed81-340c-4ac4-993e-409deefce9b8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:44 crc kubenswrapper[4857]: I1128 13:22:44.962346 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/2a2aed81-340c-4ac4-993e-409deefce9b8-var-lock\") pod \"2a2aed81-340c-4ac4-993e-409deefce9b8\" (UID: \"2a2aed81-340c-4ac4-993e-409deefce9b8\") " Nov 28 13:22:44 crc kubenswrapper[4857]: I1128 13:22:44.962427 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2a2aed81-340c-4ac4-993e-409deefce9b8-var-lock" (OuterVolumeSpecName: "var-lock") pod "2a2aed81-340c-4ac4-993e-409deefce9b8" (UID: "2a2aed81-340c-4ac4-993e-409deefce9b8"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:22:44 crc kubenswrapper[4857]: I1128 13:22:44.962477 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2a2aed81-340c-4ac4-993e-409deefce9b8-kube-api-access\") pod \"2a2aed81-340c-4ac4-993e-409deefce9b8\" (UID: \"2a2aed81-340c-4ac4-993e-409deefce9b8\") " Nov 28 13:22:44 crc kubenswrapper[4857]: I1128 13:22:44.962603 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2a2aed81-340c-4ac4-993e-409deefce9b8-kubelet-dir\") pod \"2a2aed81-340c-4ac4-993e-409deefce9b8\" (UID: \"2a2aed81-340c-4ac4-993e-409deefce9b8\") " Nov 28 13:22:44 crc kubenswrapper[4857]: I1128 13:22:44.962694 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2a2aed81-340c-4ac4-993e-409deefce9b8-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "2a2aed81-340c-4ac4-993e-409deefce9b8" (UID: "2a2aed81-340c-4ac4-993e-409deefce9b8"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:22:44 crc kubenswrapper[4857]: I1128 13:22:44.963177 4857 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2a2aed81-340c-4ac4-993e-409deefce9b8-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:44 crc kubenswrapper[4857]: I1128 13:22:44.963231 4857 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/2a2aed81-340c-4ac4-993e-409deefce9b8-var-lock\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:44 crc kubenswrapper[4857]: I1128 13:22:44.971102 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a2aed81-340c-4ac4-993e-409deefce9b8-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "2a2aed81-340c-4ac4-993e-409deefce9b8" (UID: "2a2aed81-340c-4ac4-993e-409deefce9b8"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.064977 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2a2aed81-340c-4ac4-993e-409deefce9b8-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.578847 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.578930 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"2a2aed81-340c-4ac4-993e-409deefce9b8","Type":"ContainerDied","Data":"eb0d0cf8d029231ae0adfda8313dfe6c0db036480acbe62ef0f895dc1018a549"} Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.579363 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb0d0cf8d029231ae0adfda8313dfe6c0db036480acbe62ef0f895dc1018a549" Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.605462 4857 status_manager.go:851] "Failed to get status for pod" podUID="2a2aed81-340c-4ac4-993e-409deefce9b8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.605625 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.738577 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.739526 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.740226 4857 status_manager.go:851] "Failed to get status for pod" podUID="2a2aed81-340c-4ac4-993e-409deefce9b8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.740729 4857 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.741088 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.778898 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.779047 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.779035 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.779071 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.779107 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.779218 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.779457 4857 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.779476 4857 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:45 crc kubenswrapper[4857]: I1128 13:22:45.779485 4857 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.319790 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.587317 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.588825 4857 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee" exitCode=0 Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.588884 4857 scope.go:117] "RemoveContainer" containerID="6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.588892 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.589332 4857 status_manager.go:851] "Failed to get status for pod" podUID="2a2aed81-340c-4ac4-993e-409deefce9b8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.589524 4857 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.589692 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.591311 4857 status_manager.go:851] "Failed to get status for pod" podUID="2a2aed81-340c-4ac4-993e-409deefce9b8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.591583 4857 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.591908 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.604603 4857 scope.go:117] "RemoveContainer" containerID="12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.620620 4857 scope.go:117] "RemoveContainer" containerID="8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.635030 4857 scope.go:117] "RemoveContainer" containerID="489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.653583 4857 scope.go:117] "RemoveContainer" containerID="e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.685824 4857 scope.go:117] "RemoveContainer" containerID="aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.710122 4857 scope.go:117] "RemoveContainer" containerID="6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b" Nov 28 13:22:46 crc kubenswrapper[4857]: E1128 13:22:46.710640 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\": container with ID starting with 6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b not found: ID does not exist" containerID="6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.710670 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b"} err="failed to get container status \"6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\": rpc error: code = NotFound desc = could not find container \"6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b\": container with ID starting with 6d032cb68ffac44f7cfcb9ca50c49f6f3332d212186355e237d53a9deae0370b not found: ID does not exist" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.710693 4857 scope.go:117] "RemoveContainer" containerID="12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49" Nov 28 13:22:46 crc kubenswrapper[4857]: E1128 13:22:46.711116 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\": container with ID starting with 12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49 not found: ID does not exist" containerID="12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.711135 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49"} err="failed to get container status \"12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\": rpc error: code = NotFound desc = could not find container \"12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49\": container with ID starting with 12c7ba94d3fe24168c61754492549c1c4fccb5cf9e85c0e5b554bc65a140cd49 not found: ID does not exist" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.711148 4857 scope.go:117] "RemoveContainer" containerID="8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4" Nov 28 13:22:46 crc kubenswrapper[4857]: E1128 13:22:46.711545 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\": container with ID starting with 8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4 not found: ID does not exist" containerID="8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.711567 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4"} err="failed to get container status \"8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\": rpc error: code = NotFound desc = could not find container \"8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4\": container with ID starting with 8dbb888204bc92f7a494752a4ce48707d3195ca70843eea4df5c74da9c34f4f4 not found: ID does not exist" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.711582 4857 scope.go:117] "RemoveContainer" containerID="489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7" Nov 28 13:22:46 crc kubenswrapper[4857]: E1128 13:22:46.711886 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\": container with ID starting with 489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7 not found: ID does not exist" containerID="489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.711914 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7"} err="failed to get container status \"489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\": rpc error: code = NotFound desc = could not find container \"489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7\": container with ID starting with 489ead834d1667060bd1842acb68a009c26d76d6d01baf7f8872fcbfcc1312d7 not found: ID does not exist" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.711933 4857 scope.go:117] "RemoveContainer" containerID="e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee" Nov 28 13:22:46 crc kubenswrapper[4857]: E1128 13:22:46.712221 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\": container with ID starting with e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee not found: ID does not exist" containerID="e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.712239 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee"} err="failed to get container status \"e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\": rpc error: code = NotFound desc = could not find container \"e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee\": container with ID starting with e2a2acd93aedb8edda37a653e277dd2bee8df0d5e7e0a535778730e907fd7aee not found: ID does not exist" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.712250 4857 scope.go:117] "RemoveContainer" containerID="aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f" Nov 28 13:22:46 crc kubenswrapper[4857]: E1128 13:22:46.712630 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\": container with ID starting with aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f not found: ID does not exist" containerID="aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f" Nov 28 13:22:46 crc kubenswrapper[4857]: I1128 13:22:46.712733 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f"} err="failed to get container status \"aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\": rpc error: code = NotFound desc = could not find container \"aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f\": container with ID starting with aef6ccccc83a053f80e3a5ebadeedf7c269afd73db955124376464e75ffa195f not found: ID does not exist" Nov 28 13:22:48 crc kubenswrapper[4857]: I1128 13:22:48.313843 4857 status_manager.go:851] "Failed to get status for pod" podUID="2a2aed81-340c-4ac4-993e-409deefce9b8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:48 crc kubenswrapper[4857]: I1128 13:22:48.314414 4857 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:48 crc kubenswrapper[4857]: I1128 13:22:48.314913 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:49 crc kubenswrapper[4857]: E1128 13:22:49.682901 4857 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:49 crc kubenswrapper[4857]: E1128 13:22:49.684100 4857 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:49 crc kubenswrapper[4857]: E1128 13:22:49.684708 4857 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:49 crc kubenswrapper[4857]: E1128 13:22:49.685292 4857 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:49 crc kubenswrapper[4857]: E1128 13:22:49.686039 4857 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:49 crc kubenswrapper[4857]: I1128 13:22:49.686107 4857 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 28 13:22:49 crc kubenswrapper[4857]: E1128 13:22:49.686616 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.227:6443: connect: connection refused" interval="200ms" Nov 28 13:22:49 crc kubenswrapper[4857]: E1128 13:22:49.887550 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.227:6443: connect: connection refused" interval="400ms" Nov 28 13:22:50 crc kubenswrapper[4857]: E1128 13:22:50.289226 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.227:6443: connect: connection refused" interval="800ms" Nov 28 13:22:50 crc kubenswrapper[4857]: E1128 13:22:50.340522 4857 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.227:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-t448t" volumeName="registry-storage" Nov 28 13:22:51 crc kubenswrapper[4857]: E1128 13:22:51.090803 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.227:6443: connect: connection refused" interval="1.6s" Nov 28 13:22:52 crc kubenswrapper[4857]: E1128 13:22:52.309114 4857 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.227:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c2e649bb5d6d0 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 13:22:43.666384592 +0000 UTC m=+255.693759759,LastTimestamp:2025-11-28 13:22:43.666384592 +0000 UTC m=+255.693759759,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 13:22:52 crc kubenswrapper[4857]: E1128 13:22:52.691663 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.227:6443: connect: connection refused" interval="3.2s" Nov 28 13:22:54 crc kubenswrapper[4857]: I1128 13:22:54.309538 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:54 crc kubenswrapper[4857]: I1128 13:22:54.310986 4857 status_manager.go:851] "Failed to get status for pod" podUID="2a2aed81-340c-4ac4-993e-409deefce9b8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:54 crc kubenswrapper[4857]: I1128 13:22:54.313273 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:54 crc kubenswrapper[4857]: I1128 13:22:54.326427 4857 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f137641-9550-43d3-99c9-fb4d61e9eeb9" Nov 28 13:22:54 crc kubenswrapper[4857]: I1128 13:22:54.326458 4857 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f137641-9550-43d3-99c9-fb4d61e9eeb9" Nov 28 13:22:54 crc kubenswrapper[4857]: E1128 13:22:54.326887 4857 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:54 crc kubenswrapper[4857]: I1128 13:22:54.327409 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:54 crc kubenswrapper[4857]: I1128 13:22:54.632375 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"485413dd3c3cb6057178eccbcc08f003952a132095f560548ed2cfe2cb5feddb"} Nov 28 13:22:54 crc kubenswrapper[4857]: I1128 13:22:54.632418 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"041c39861fecc35530580ccc0c06a90a4e478cdee1ffdbaee79e7f61c1837278"} Nov 28 13:22:54 crc kubenswrapper[4857]: I1128 13:22:54.632674 4857 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f137641-9550-43d3-99c9-fb4d61e9eeb9" Nov 28 13:22:54 crc kubenswrapper[4857]: I1128 13:22:54.632693 4857 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f137641-9550-43d3-99c9-fb4d61e9eeb9" Nov 28 13:22:54 crc kubenswrapper[4857]: E1128 13:22:54.633085 4857 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:54 crc kubenswrapper[4857]: I1128 13:22:54.633127 4857 status_manager.go:851] "Failed to get status for pod" podUID="2a2aed81-340c-4ac4-993e-409deefce9b8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:54 crc kubenswrapper[4857]: I1128 13:22:54.633540 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:55 crc kubenswrapper[4857]: I1128 13:22:55.638809 4857 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="485413dd3c3cb6057178eccbcc08f003952a132095f560548ed2cfe2cb5feddb" exitCode=0 Nov 28 13:22:55 crc kubenswrapper[4857]: I1128 13:22:55.638912 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"485413dd3c3cb6057178eccbcc08f003952a132095f560548ed2cfe2cb5feddb"} Nov 28 13:22:55 crc kubenswrapper[4857]: I1128 13:22:55.639403 4857 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f137641-9550-43d3-99c9-fb4d61e9eeb9" Nov 28 13:22:55 crc kubenswrapper[4857]: I1128 13:22:55.639420 4857 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f137641-9550-43d3-99c9-fb4d61e9eeb9" Nov 28 13:22:55 crc kubenswrapper[4857]: E1128 13:22:55.639965 4857 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:55 crc kubenswrapper[4857]: I1128 13:22:55.639964 4857 status_manager.go:851] "Failed to get status for pod" podUID="2a2aed81-340c-4ac4-993e-409deefce9b8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:55 crc kubenswrapper[4857]: I1128 13:22:55.640419 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.227:6443: connect: connection refused" Nov 28 13:22:55 crc kubenswrapper[4857]: I1128 13:22:55.852942 4857 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Liveness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 28 13:22:55 crc kubenswrapper[4857]: I1128 13:22:55.853010 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 28 13:22:55 crc kubenswrapper[4857]: E1128 13:22:55.893180 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.227:6443: connect: connection refused" interval="6.4s" Nov 28 13:22:56 crc kubenswrapper[4857]: I1128 13:22:56.661308 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"cd8a5e73a5ca1c0ddd422ac30b010d29ea54aadb5b7247a1c9a22fe40f093197"} Nov 28 13:22:56 crc kubenswrapper[4857]: I1128 13:22:56.661389 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5ce60acbcc90eb70937c95cde2d69b83d2f4b68c8d9876ea34e5420c4fb4b860"} Nov 28 13:22:56 crc kubenswrapper[4857]: I1128 13:22:56.661408 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"8f82eefd26bf6a13ef797a639d6e44c49c480168ed64c8d70faac7887571bce9"} Nov 28 13:22:56 crc kubenswrapper[4857]: I1128 13:22:56.661425 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"c3366e4219a4519f161dc63a62aad327e02b3a884e7ebe33ecf930bb7ed7d341"} Nov 28 13:22:56 crc kubenswrapper[4857]: I1128 13:22:56.669980 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 13:22:56 crc kubenswrapper[4857]: I1128 13:22:56.670467 4857 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3" exitCode=1 Nov 28 13:22:56 crc kubenswrapper[4857]: I1128 13:22:56.670526 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3"} Nov 28 13:22:56 crc kubenswrapper[4857]: I1128 13:22:56.671287 4857 scope.go:117] "RemoveContainer" containerID="3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3" Nov 28 13:22:57 crc kubenswrapper[4857]: I1128 13:22:57.677536 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"eb3faf24bc678c7c325e1a3f1e22498be79fae13b63de8115714fe7a565c92bc"} Nov 28 13:22:57 crc kubenswrapper[4857]: I1128 13:22:57.677883 4857 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f137641-9550-43d3-99c9-fb4d61e9eeb9" Nov 28 13:22:57 crc kubenswrapper[4857]: I1128 13:22:57.677902 4857 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f137641-9550-43d3-99c9-fb4d61e9eeb9" Nov 28 13:22:57 crc kubenswrapper[4857]: I1128 13:22:57.678106 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:57 crc kubenswrapper[4857]: I1128 13:22:57.681071 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 13:22:57 crc kubenswrapper[4857]: I1128 13:22:57.681127 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"d799156dc236e23b108a76a2778dedbdd76cd63223c3cbda1a36615de1c454fe"} Nov 28 13:22:59 crc kubenswrapper[4857]: I1128 13:22:59.329145 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:59 crc kubenswrapper[4857]: I1128 13:22:59.329504 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:22:59 crc kubenswrapper[4857]: I1128 13:22:59.337970 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:23:02 crc kubenswrapper[4857]: I1128 13:23:02.688346 4857 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:23:02 crc kubenswrapper[4857]: I1128 13:23:02.712214 4857 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f137641-9550-43d3-99c9-fb4d61e9eeb9" Nov 28 13:23:02 crc kubenswrapper[4857]: I1128 13:23:02.712245 4857 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f137641-9550-43d3-99c9-fb4d61e9eeb9" Nov 28 13:23:02 crc kubenswrapper[4857]: I1128 13:23:02.716168 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:23:02 crc kubenswrapper[4857]: I1128 13:23:02.719028 4857 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="a3af17df-1ab6-447c-8fdf-075676e2ffb1" Nov 28 13:23:03 crc kubenswrapper[4857]: I1128 13:23:03.717193 4857 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f137641-9550-43d3-99c9-fb4d61e9eeb9" Nov 28 13:23:03 crc kubenswrapper[4857]: I1128 13:23:03.717225 4857 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f137641-9550-43d3-99c9-fb4d61e9eeb9" Nov 28 13:23:03 crc kubenswrapper[4857]: I1128 13:23:03.800596 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:23:03 crc kubenswrapper[4857]: I1128 13:23:03.800906 4857 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 28 13:23:03 crc kubenswrapper[4857]: I1128 13:23:03.800958 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 28 13:23:04 crc kubenswrapper[4857]: I1128 13:23:04.952723 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:23:08 crc kubenswrapper[4857]: I1128 13:23:08.325307 4857 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="a3af17df-1ab6-447c-8fdf-075676e2ffb1" Nov 28 13:23:09 crc kubenswrapper[4857]: I1128 13:23:09.888648 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 28 13:23:11 crc kubenswrapper[4857]: I1128 13:23:11.968784 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 13:23:12 crc kubenswrapper[4857]: I1128 13:23:12.310636 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 28 13:23:13 crc kubenswrapper[4857]: I1128 13:23:13.011239 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 13:23:13 crc kubenswrapper[4857]: I1128 13:23:13.378211 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 28 13:23:13 crc kubenswrapper[4857]: I1128 13:23:13.800949 4857 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 28 13:23:13 crc kubenswrapper[4857]: I1128 13:23:13.801030 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 28 13:23:14 crc kubenswrapper[4857]: I1128 13:23:14.588260 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 28 13:23:14 crc kubenswrapper[4857]: I1128 13:23:14.784835 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 13:23:14 crc kubenswrapper[4857]: I1128 13:23:14.872674 4857 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 28 13:23:15 crc kubenswrapper[4857]: I1128 13:23:15.183732 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 28 13:23:15 crc kubenswrapper[4857]: I1128 13:23:15.251499 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 28 13:23:15 crc kubenswrapper[4857]: I1128 13:23:15.648819 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 28 13:23:15 crc kubenswrapper[4857]: I1128 13:23:15.659479 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 28 13:23:15 crc kubenswrapper[4857]: I1128 13:23:15.784864 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 28 13:23:15 crc kubenswrapper[4857]: I1128 13:23:15.924143 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 28 13:23:16 crc kubenswrapper[4857]: I1128 13:23:16.028306 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 28 13:23:16 crc kubenswrapper[4857]: I1128 13:23:16.040736 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 28 13:23:16 crc kubenswrapper[4857]: I1128 13:23:16.272319 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 28 13:23:16 crc kubenswrapper[4857]: I1128 13:23:16.313420 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 28 13:23:16 crc kubenswrapper[4857]: I1128 13:23:16.444249 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 28 13:23:16 crc kubenswrapper[4857]: I1128 13:23:16.483593 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 28 13:23:16 crc kubenswrapper[4857]: I1128 13:23:16.633107 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 13:23:16 crc kubenswrapper[4857]: I1128 13:23:16.660533 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 28 13:23:16 crc kubenswrapper[4857]: I1128 13:23:16.728916 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 13:23:16 crc kubenswrapper[4857]: I1128 13:23:16.794840 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 28 13:23:16 crc kubenswrapper[4857]: I1128 13:23:16.947797 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 13:23:16 crc kubenswrapper[4857]: I1128 13:23:16.999423 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 28 13:23:17 crc kubenswrapper[4857]: I1128 13:23:17.426547 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 28 13:23:17 crc kubenswrapper[4857]: I1128 13:23:17.434187 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 13:23:17 crc kubenswrapper[4857]: I1128 13:23:17.440361 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 13:23:17 crc kubenswrapper[4857]: I1128 13:23:17.538232 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 28 13:23:17 crc kubenswrapper[4857]: I1128 13:23:17.678199 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 28 13:23:17 crc kubenswrapper[4857]: I1128 13:23:17.728602 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 13:23:17 crc kubenswrapper[4857]: I1128 13:23:17.831889 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 28 13:23:17 crc kubenswrapper[4857]: I1128 13:23:17.841971 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 28 13:23:17 crc kubenswrapper[4857]: I1128 13:23:17.969374 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 28 13:23:18 crc kubenswrapper[4857]: I1128 13:23:18.105548 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 28 13:23:18 crc kubenswrapper[4857]: I1128 13:23:18.119525 4857 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 28 13:23:18 crc kubenswrapper[4857]: I1128 13:23:18.145692 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 28 13:23:18 crc kubenswrapper[4857]: I1128 13:23:18.439224 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 28 13:23:18 crc kubenswrapper[4857]: I1128 13:23:18.453524 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 28 13:23:18 crc kubenswrapper[4857]: I1128 13:23:18.704797 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 13:23:18 crc kubenswrapper[4857]: I1128 13:23:18.764925 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 13:23:18 crc kubenswrapper[4857]: I1128 13:23:18.841046 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 28 13:23:18 crc kubenswrapper[4857]: I1128 13:23:18.863068 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 28 13:23:18 crc kubenswrapper[4857]: I1128 13:23:18.957783 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 28 13:23:18 crc kubenswrapper[4857]: I1128 13:23:18.970706 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 13:23:19 crc kubenswrapper[4857]: I1128 13:23:19.085263 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 28 13:23:19 crc kubenswrapper[4857]: I1128 13:23:19.091440 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 28 13:23:19 crc kubenswrapper[4857]: I1128 13:23:19.282236 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 28 13:23:19 crc kubenswrapper[4857]: I1128 13:23:19.308377 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 28 13:23:19 crc kubenswrapper[4857]: I1128 13:23:19.308433 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 28 13:23:19 crc kubenswrapper[4857]: I1128 13:23:19.450425 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 13:23:19 crc kubenswrapper[4857]: I1128 13:23:19.517876 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 28 13:23:19 crc kubenswrapper[4857]: I1128 13:23:19.524784 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 28 13:23:19 crc kubenswrapper[4857]: I1128 13:23:19.525064 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 28 13:23:19 crc kubenswrapper[4857]: I1128 13:23:19.667918 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 28 13:23:19 crc kubenswrapper[4857]: I1128 13:23:19.682524 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 28 13:23:19 crc kubenswrapper[4857]: I1128 13:23:19.714451 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 28 13:23:19 crc kubenswrapper[4857]: I1128 13:23:19.723360 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 28 13:23:19 crc kubenswrapper[4857]: I1128 13:23:19.891070 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 28 13:23:19 crc kubenswrapper[4857]: I1128 13:23:19.937416 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 28 13:23:19 crc kubenswrapper[4857]: I1128 13:23:19.971327 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.109587 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.187830 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.213440 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.218067 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.219953 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.241849 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.308505 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.321654 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.393743 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.400327 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.432337 4857 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.434657 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=37.434628309 podStartE2EDuration="37.434628309s" podCreationTimestamp="2025-11-28 13:22:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:23:02.317768274 +0000 UTC m=+274.345143481" watchObservedRunningTime="2025-11-28 13:23:20.434628309 +0000 UTC m=+292.462003506" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.441365 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.441456 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj","openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 13:23:20 crc kubenswrapper[4857]: E1128 13:23:20.441784 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a2aed81-340c-4ac4-993e-409deefce9b8" containerName="installer" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.441804 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a2aed81-340c-4ac4-993e-409deefce9b8" containerName="installer" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.442020 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a2aed81-340c-4ac4-993e-409deefce9b8" containerName="installer" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.442627 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.444081 4857 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f137641-9550-43d3-99c9-fb4d61e9eeb9" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.444128 4857 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f137641-9550-43d3-99c9-fb4d61e9eeb9" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.446093 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.449338 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.449617 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.450050 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.450180 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.450403 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.451197 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.451223 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.451406 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.451650 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.451888 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.452095 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.457845 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.461981 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.466537 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.472429 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.476581 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.489447 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.493616 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=18.493601477 podStartE2EDuration="18.493601477s" podCreationTimestamp="2025-11-28 13:23:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:23:20.493272468 +0000 UTC m=+292.520647645" watchObservedRunningTime="2025-11-28 13:23:20.493601477 +0000 UTC m=+292.520976654" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.520408 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.521841 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.572664 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.635657 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.635778 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-session\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.635846 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.635905 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.635956 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.636044 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3c8b459d-f9f5-4df6-8abb-5bebcf338849-audit-policies\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.636408 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3c8b459d-f9f5-4df6-8abb-5bebcf338849-audit-dir\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.636606 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.637169 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-user-template-login\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.637354 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-router-certs\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.637549 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-user-template-error\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.637625 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkjk4\" (UniqueName: \"kubernetes.io/projected/3c8b459d-f9f5-4df6-8abb-5bebcf338849-kube-api-access-jkjk4\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.637735 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-service-ca\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.637841 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.720401 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.738981 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-session\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.739095 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.739151 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.739204 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.739270 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3c8b459d-f9f5-4df6-8abb-5bebcf338849-audit-policies\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.739343 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3c8b459d-f9f5-4df6-8abb-5bebcf338849-audit-dir\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.739397 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.739454 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-user-template-login\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.739526 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-router-certs\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.739574 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-user-template-error\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.739628 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkjk4\" (UniqueName: \"kubernetes.io/projected/3c8b459d-f9f5-4df6-8abb-5bebcf338849-kube-api-access-jkjk4\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.739712 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-service-ca\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.739822 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.739882 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.740104 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3c8b459d-f9f5-4df6-8abb-5bebcf338849-audit-policies\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.740137 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3c8b459d-f9f5-4df6-8abb-5bebcf338849-audit-dir\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.741268 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-service-ca\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.741391 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.742563 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.745950 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.747630 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-session\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.751342 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-router-certs\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.757953 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.758094 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-user-template-login\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.760258 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.760488 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-user-template-error\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.761376 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.764806 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3c8b459d-f9f5-4df6-8abb-5bebcf338849-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.767815 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkjk4\" (UniqueName: \"kubernetes.io/projected/3c8b459d-f9f5-4df6-8abb-5bebcf338849-kube-api-access-jkjk4\") pod \"oauth-openshift-6b89b7dbcb-7v4fj\" (UID: \"3c8b459d-f9f5-4df6-8abb-5bebcf338849\") " pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.777180 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.983959 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 13:23:20 crc kubenswrapper[4857]: I1128 13:23:20.988220 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 28 13:23:21 crc kubenswrapper[4857]: I1128 13:23:21.018675 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 13:23:21 crc kubenswrapper[4857]: I1128 13:23:21.041220 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 28 13:23:21 crc kubenswrapper[4857]: I1128 13:23:21.143104 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 28 13:23:21 crc kubenswrapper[4857]: I1128 13:23:21.176104 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 13:23:21 crc kubenswrapper[4857]: I1128 13:23:21.201694 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 13:23:21 crc kubenswrapper[4857]: I1128 13:23:21.239398 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 28 13:23:21 crc kubenswrapper[4857]: I1128 13:23:21.257029 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 28 13:23:21 crc kubenswrapper[4857]: I1128 13:23:21.293106 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 28 13:23:21 crc kubenswrapper[4857]: I1128 13:23:21.373250 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 28 13:23:21 crc kubenswrapper[4857]: I1128 13:23:21.383393 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 13:23:21 crc kubenswrapper[4857]: I1128 13:23:21.397806 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 28 13:23:21 crc kubenswrapper[4857]: I1128 13:23:21.754426 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 28 13:23:21 crc kubenswrapper[4857]: I1128 13:23:21.825525 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 28 13:23:21 crc kubenswrapper[4857]: I1128 13:23:21.854681 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 28 13:23:21 crc kubenswrapper[4857]: I1128 13:23:21.871113 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 28 13:23:21 crc kubenswrapper[4857]: I1128 13:23:21.932050 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 13:23:21 crc kubenswrapper[4857]: I1128 13:23:21.941199 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.057413 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.068289 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.074449 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.108175 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.137675 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.250885 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.267359 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.332903 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.358703 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.360510 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.367650 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.403245 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.409689 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.415273 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.449434 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.489168 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.578552 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.624101 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.799276 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 28 13:23:22 crc kubenswrapper[4857]: I1128 13:23:22.898270 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.047615 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.066212 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.140608 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.155666 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.278547 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.300407 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.300575 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.547200 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.551304 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.572504 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.595476 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.661525 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.699227 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.705550 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.705941 4857 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.753020 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.762869 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.766640 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.769414 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.800915 4857 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.801806 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.801857 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.802447 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"d799156dc236e23b108a76a2778dedbdd76cd63223c3cbda1a36615de1c454fe"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.802562 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://d799156dc236e23b108a76a2778dedbdd76cd63223c3cbda1a36615de1c454fe" gracePeriod=30 Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.836079 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.892414 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 28 13:23:23 crc kubenswrapper[4857]: I1128 13:23:23.925523 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.035106 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.060575 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.138263 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.139342 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.149744 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.214669 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.250416 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.261307 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.268958 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.269811 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.323126 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.344334 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.374456 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.377875 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.407296 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.443132 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.545370 4857 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.640087 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.699039 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.743126 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.808678 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.828423 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.868797 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.909026 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.917703 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.963150 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.987830 4857 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 13:23:24 crc kubenswrapper[4857]: I1128 13:23:24.988042 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://dbe33186017c978b49351c697dff50f51b6f5341963e46e5d7c68bf3ed7de188" gracePeriod=5 Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.018831 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.020101 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.190289 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.221473 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.242374 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.250303 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.291772 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.315820 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.358452 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.456646 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.477303 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.533156 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.549532 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.551289 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.671202 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.795399 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.850305 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.863223 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.930302 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 28 13:23:25 crc kubenswrapper[4857]: I1128 13:23:25.972579 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 28 13:23:26 crc kubenswrapper[4857]: I1128 13:23:26.161599 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 28 13:23:26 crc kubenswrapper[4857]: I1128 13:23:26.210707 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 28 13:23:26 crc kubenswrapper[4857]: I1128 13:23:26.323212 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 28 13:23:26 crc kubenswrapper[4857]: I1128 13:23:26.433285 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 28 13:23:26 crc kubenswrapper[4857]: I1128 13:23:26.625096 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 28 13:23:26 crc kubenswrapper[4857]: I1128 13:23:26.719022 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 28 13:23:26 crc kubenswrapper[4857]: I1128 13:23:26.720951 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 28 13:23:26 crc kubenswrapper[4857]: I1128 13:23:26.846701 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 28 13:23:26 crc kubenswrapper[4857]: I1128 13:23:26.913189 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 28 13:23:26 crc kubenswrapper[4857]: I1128 13:23:26.990175 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 28 13:23:27 crc kubenswrapper[4857]: I1128 13:23:27.102244 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 28 13:23:27 crc kubenswrapper[4857]: I1128 13:23:27.225072 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 28 13:23:27 crc kubenswrapper[4857]: I1128 13:23:27.257543 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 28 13:23:27 crc kubenswrapper[4857]: I1128 13:23:27.263350 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 28 13:23:27 crc kubenswrapper[4857]: I1128 13:23:27.299880 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 28 13:23:27 crc kubenswrapper[4857]: I1128 13:23:27.539697 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 28 13:23:27 crc kubenswrapper[4857]: I1128 13:23:27.623165 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 28 13:23:27 crc kubenswrapper[4857]: I1128 13:23:27.634985 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 28 13:23:27 crc kubenswrapper[4857]: I1128 13:23:27.671352 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 28 13:23:27 crc kubenswrapper[4857]: I1128 13:23:27.836209 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 28 13:23:27 crc kubenswrapper[4857]: I1128 13:23:27.940870 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 28 13:23:28 crc kubenswrapper[4857]: I1128 13:23:28.079901 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 28 13:23:28 crc kubenswrapper[4857]: I1128 13:23:28.136341 4857 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Nov 28 13:23:28 crc kubenswrapper[4857]: I1128 13:23:28.154224 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 28 13:23:28 crc kubenswrapper[4857]: I1128 13:23:28.242887 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 28 13:23:28 crc kubenswrapper[4857]: I1128 13:23:28.310484 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 28 13:23:28 crc kubenswrapper[4857]: I1128 13:23:28.470790 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 28 13:23:28 crc kubenswrapper[4857]: I1128 13:23:28.508570 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 28 13:23:28 crc kubenswrapper[4857]: I1128 13:23:28.532615 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 28 13:23:28 crc kubenswrapper[4857]: I1128 13:23:28.655725 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 28 13:23:28 crc kubenswrapper[4857]: I1128 13:23:28.883287 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 28 13:23:28 crc kubenswrapper[4857]: I1128 13:23:28.970804 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 28 13:23:29 crc kubenswrapper[4857]: I1128 13:23:29.088813 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 28 13:23:29 crc kubenswrapper[4857]: I1128 13:23:29.146088 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 28 13:23:29 crc kubenswrapper[4857]: I1128 13:23:29.311365 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 28 13:23:29 crc kubenswrapper[4857]: I1128 13:23:29.569386 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 28 13:23:29 crc kubenswrapper[4857]: I1128 13:23:29.571604 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 28 13:23:29 crc kubenswrapper[4857]: I1128 13:23:29.675640 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj"] Nov 28 13:23:29 crc kubenswrapper[4857]: I1128 13:23:29.737630 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 28 13:23:29 crc kubenswrapper[4857]: I1128 13:23:29.779246 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 28 13:23:29 crc kubenswrapper[4857]: I1128 13:23:29.823851 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 28 13:23:29 crc kubenswrapper[4857]: I1128 13:23:29.896432 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj"] Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.052158 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.195929 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.353575 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.393528 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.560967 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.561046 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.760698 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.760874 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.761250 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.761379 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.761419 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.761509 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.761504 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.761562 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.761783 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.762021 4857 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.762046 4857 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.762056 4857 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.762065 4857 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.771102 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.862969 4857 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.883917 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" event={"ID":"3c8b459d-f9f5-4df6-8abb-5bebcf338849","Type":"ContainerStarted","Data":"58ade90e5efc96d9462c2f15c41735ad2f73d0c387f1bef9a7418732b604f23b"} Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.884006 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" event={"ID":"3c8b459d-f9f5-4df6-8abb-5bebcf338849","Type":"ContainerStarted","Data":"99f6e1bd7c51f19dc9bfb2ef09e1e2f0769716658744d85cda0a5abfe43acd12"} Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.886515 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.886564 4857 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="dbe33186017c978b49351c697dff50f51b6f5341963e46e5d7c68bf3ed7de188" exitCode=137 Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.886608 4857 scope.go:117] "RemoveContainer" containerID="dbe33186017c978b49351c697dff50f51b6f5341963e46e5d7c68bf3ed7de188" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.886690 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.922462 4857 scope.go:117] "RemoveContainer" containerID="dbe33186017c978b49351c697dff50f51b6f5341963e46e5d7c68bf3ed7de188" Nov 28 13:23:30 crc kubenswrapper[4857]: E1128 13:23:30.924235 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbe33186017c978b49351c697dff50f51b6f5341963e46e5d7c68bf3ed7de188\": container with ID starting with dbe33186017c978b49351c697dff50f51b6f5341963e46e5d7c68bf3ed7de188 not found: ID does not exist" containerID="dbe33186017c978b49351c697dff50f51b6f5341963e46e5d7c68bf3ed7de188" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.924294 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbe33186017c978b49351c697dff50f51b6f5341963e46e5d7c68bf3ed7de188"} err="failed to get container status \"dbe33186017c978b49351c697dff50f51b6f5341963e46e5d7c68bf3ed7de188\": rpc error: code = NotFound desc = could not find container \"dbe33186017c978b49351c697dff50f51b6f5341963e46e5d7c68bf3ed7de188\": container with ID starting with dbe33186017c978b49351c697dff50f51b6f5341963e46e5d7c68bf3ed7de188 not found: ID does not exist" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.928869 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" podStartSLOduration=84.928850559 podStartE2EDuration="1m24.928850559s" podCreationTimestamp="2025-11-28 13:22:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:23:30.924814867 +0000 UTC m=+302.952190084" watchObservedRunningTime="2025-11-28 13:23:30.928850559 +0000 UTC m=+302.956225736" Nov 28 13:23:30 crc kubenswrapper[4857]: I1128 13:23:30.935079 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 13:23:31 crc kubenswrapper[4857]: I1128 13:23:31.015708 4857 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 13:23:31 crc kubenswrapper[4857]: I1128 13:23:31.530965 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 28 13:23:31 crc kubenswrapper[4857]: I1128 13:23:31.893273 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:31 crc kubenswrapper[4857]: I1128 13:23:31.899604 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-6b89b7dbcb-7v4fj" Nov 28 13:23:32 crc kubenswrapper[4857]: I1128 13:23:32.065433 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 28 13:23:32 crc kubenswrapper[4857]: I1128 13:23:32.241714 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 28 13:23:32 crc kubenswrapper[4857]: I1128 13:23:32.315375 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 28 13:23:32 crc kubenswrapper[4857]: I1128 13:23:32.315607 4857 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Nov 28 13:23:32 crc kubenswrapper[4857]: I1128 13:23:32.326035 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 13:23:32 crc kubenswrapper[4857]: I1128 13:23:32.326074 4857 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="100300f6-680e-4cc7-b66c-d95f9ade082a" Nov 28 13:23:32 crc kubenswrapper[4857]: I1128 13:23:32.329241 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 13:23:32 crc kubenswrapper[4857]: I1128 13:23:32.329276 4857 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="100300f6-680e-4cc7-b66c-d95f9ade082a" Nov 28 13:23:32 crc kubenswrapper[4857]: I1128 13:23:32.477027 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 28 13:23:32 crc kubenswrapper[4857]: I1128 13:23:32.644704 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 28 13:23:54 crc kubenswrapper[4857]: I1128 13:23:54.030322 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Nov 28 13:23:54 crc kubenswrapper[4857]: I1128 13:23:54.033303 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 13:23:54 crc kubenswrapper[4857]: I1128 13:23:54.033352 4857 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="d799156dc236e23b108a76a2778dedbdd76cd63223c3cbda1a36615de1c454fe" exitCode=137 Nov 28 13:23:54 crc kubenswrapper[4857]: I1128 13:23:54.033403 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"d799156dc236e23b108a76a2778dedbdd76cd63223c3cbda1a36615de1c454fe"} Nov 28 13:23:54 crc kubenswrapper[4857]: I1128 13:23:54.033460 4857 scope.go:117] "RemoveContainer" containerID="3bf1a2c2f0573fb7e0f97f1e97c99a259b8489244245c30908aedef704ad09f3" Nov 28 13:23:55 crc kubenswrapper[4857]: I1128 13:23:55.045193 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Nov 28 13:23:55 crc kubenswrapper[4857]: I1128 13:23:55.046821 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2474357a0c41bfcf0abf4ad53c60bb617e95c9c48cf58d4146306b4b5a55bded"} Nov 28 13:24:03 crc kubenswrapper[4857]: I1128 13:24:03.800419 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:24:03 crc kubenswrapper[4857]: I1128 13:24:03.814081 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:24:04 crc kubenswrapper[4857]: I1128 13:24:04.109563 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:24:04 crc kubenswrapper[4857]: I1128 13:24:04.115605 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:24:14 crc kubenswrapper[4857]: I1128 13:24:14.681367 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zpqmp"] Nov 28 13:24:14 crc kubenswrapper[4857]: I1128 13:24:14.683261 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" podUID="e22d80c8-5706-475b-a385-00c22ad2eaea" containerName="controller-manager" containerID="cri-o://f7afc25e123b2ca66a05f051f57b6b708689ad93856277fafc46e8ef6856fe89" gracePeriod=30 Nov 28 13:24:14 crc kubenswrapper[4857]: I1128 13:24:14.685685 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb"] Nov 28 13:24:14 crc kubenswrapper[4857]: I1128 13:24:14.685930 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" podUID="a760fec5-b64d-43e2-bda1-aaaf4bd78a3c" containerName="route-controller-manager" containerID="cri-o://7770c9f648981f653774f4944176e0ad8d999d5135ba3e05ee61acdb97f02fcc" gracePeriod=30 Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.080109 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.086531 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.176668 4857 generic.go:334] "Generic (PLEG): container finished" podID="a760fec5-b64d-43e2-bda1-aaaf4bd78a3c" containerID="7770c9f648981f653774f4944176e0ad8d999d5135ba3e05ee61acdb97f02fcc" exitCode=0 Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.176729 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" event={"ID":"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c","Type":"ContainerDied","Data":"7770c9f648981f653774f4944176e0ad8d999d5135ba3e05ee61acdb97f02fcc"} Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.176799 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" event={"ID":"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c","Type":"ContainerDied","Data":"9361d4754365efa1937ddbc165fa9817dc5ba730d043bdb882ffa5e0527cb38d"} Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.176819 4857 scope.go:117] "RemoveContainer" containerID="7770c9f648981f653774f4944176e0ad8d999d5135ba3e05ee61acdb97f02fcc" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.176859 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.178564 4857 generic.go:334] "Generic (PLEG): container finished" podID="e22d80c8-5706-475b-a385-00c22ad2eaea" containerID="f7afc25e123b2ca66a05f051f57b6b708689ad93856277fafc46e8ef6856fe89" exitCode=0 Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.178592 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.178604 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" event={"ID":"e22d80c8-5706-475b-a385-00c22ad2eaea","Type":"ContainerDied","Data":"f7afc25e123b2ca66a05f051f57b6b708689ad93856277fafc46e8ef6856fe89"} Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.178703 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zpqmp" event={"ID":"e22d80c8-5706-475b-a385-00c22ad2eaea","Type":"ContainerDied","Data":"f7febd2edfcc5298e543a9427c7a308f305afb9bf5a6f7ac0e492f5d4370e544"} Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.193986 4857 scope.go:117] "RemoveContainer" containerID="7770c9f648981f653774f4944176e0ad8d999d5135ba3e05ee61acdb97f02fcc" Nov 28 13:24:15 crc kubenswrapper[4857]: E1128 13:24:15.194397 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7770c9f648981f653774f4944176e0ad8d999d5135ba3e05ee61acdb97f02fcc\": container with ID starting with 7770c9f648981f653774f4944176e0ad8d999d5135ba3e05ee61acdb97f02fcc not found: ID does not exist" containerID="7770c9f648981f653774f4944176e0ad8d999d5135ba3e05ee61acdb97f02fcc" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.194434 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7770c9f648981f653774f4944176e0ad8d999d5135ba3e05ee61acdb97f02fcc"} err="failed to get container status \"7770c9f648981f653774f4944176e0ad8d999d5135ba3e05ee61acdb97f02fcc\": rpc error: code = NotFound desc = could not find container \"7770c9f648981f653774f4944176e0ad8d999d5135ba3e05ee61acdb97f02fcc\": container with ID starting with 7770c9f648981f653774f4944176e0ad8d999d5135ba3e05ee61acdb97f02fcc not found: ID does not exist" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.194454 4857 scope.go:117] "RemoveContainer" containerID="f7afc25e123b2ca66a05f051f57b6b708689ad93856277fafc46e8ef6856fe89" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.212068 4857 scope.go:117] "RemoveContainer" containerID="f7afc25e123b2ca66a05f051f57b6b708689ad93856277fafc46e8ef6856fe89" Nov 28 13:24:15 crc kubenswrapper[4857]: E1128 13:24:15.212558 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7afc25e123b2ca66a05f051f57b6b708689ad93856277fafc46e8ef6856fe89\": container with ID starting with f7afc25e123b2ca66a05f051f57b6b708689ad93856277fafc46e8ef6856fe89 not found: ID does not exist" containerID="f7afc25e123b2ca66a05f051f57b6b708689ad93856277fafc46e8ef6856fe89" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.212586 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7afc25e123b2ca66a05f051f57b6b708689ad93856277fafc46e8ef6856fe89"} err="failed to get container status \"f7afc25e123b2ca66a05f051f57b6b708689ad93856277fafc46e8ef6856fe89\": rpc error: code = NotFound desc = could not find container \"f7afc25e123b2ca66a05f051f57b6b708689ad93856277fafc46e8ef6856fe89\": container with ID starting with f7afc25e123b2ca66a05f051f57b6b708689ad93856277fafc46e8ef6856fe89 not found: ID does not exist" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.247954 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e22d80c8-5706-475b-a385-00c22ad2eaea-config\") pod \"e22d80c8-5706-475b-a385-00c22ad2eaea\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.248027 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e22d80c8-5706-475b-a385-00c22ad2eaea-client-ca\") pod \"e22d80c8-5706-475b-a385-00c22ad2eaea\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.248068 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-config\") pod \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\" (UID: \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\") " Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.248109 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b485r\" (UniqueName: \"kubernetes.io/projected/e22d80c8-5706-475b-a385-00c22ad2eaea-kube-api-access-b485r\") pod \"e22d80c8-5706-475b-a385-00c22ad2eaea\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.248153 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e22d80c8-5706-475b-a385-00c22ad2eaea-proxy-ca-bundles\") pod \"e22d80c8-5706-475b-a385-00c22ad2eaea\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.248220 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-serving-cert\") pod \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\" (UID: \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\") " Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.248257 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-client-ca\") pod \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\" (UID: \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\") " Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.248304 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e22d80c8-5706-475b-a385-00c22ad2eaea-serving-cert\") pod \"e22d80c8-5706-475b-a385-00c22ad2eaea\" (UID: \"e22d80c8-5706-475b-a385-00c22ad2eaea\") " Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.248336 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrl5j\" (UniqueName: \"kubernetes.io/projected/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-kube-api-access-rrl5j\") pod \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\" (UID: \"a760fec5-b64d-43e2-bda1-aaaf4bd78a3c\") " Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.249183 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e22d80c8-5706-475b-a385-00c22ad2eaea-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "e22d80c8-5706-475b-a385-00c22ad2eaea" (UID: "e22d80c8-5706-475b-a385-00c22ad2eaea"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.249216 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e22d80c8-5706-475b-a385-00c22ad2eaea-client-ca" (OuterVolumeSpecName: "client-ca") pod "e22d80c8-5706-475b-a385-00c22ad2eaea" (UID: "e22d80c8-5706-475b-a385-00c22ad2eaea"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.249227 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-config" (OuterVolumeSpecName: "config") pod "a760fec5-b64d-43e2-bda1-aaaf4bd78a3c" (UID: "a760fec5-b64d-43e2-bda1-aaaf4bd78a3c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.249596 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-client-ca" (OuterVolumeSpecName: "client-ca") pod "a760fec5-b64d-43e2-bda1-aaaf4bd78a3c" (UID: "a760fec5-b64d-43e2-bda1-aaaf4bd78a3c"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.249701 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e22d80c8-5706-475b-a385-00c22ad2eaea-config" (OuterVolumeSpecName: "config") pod "e22d80c8-5706-475b-a385-00c22ad2eaea" (UID: "e22d80c8-5706-475b-a385-00c22ad2eaea"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.255011 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-kube-api-access-rrl5j" (OuterVolumeSpecName: "kube-api-access-rrl5j") pod "a760fec5-b64d-43e2-bda1-aaaf4bd78a3c" (UID: "a760fec5-b64d-43e2-bda1-aaaf4bd78a3c"). InnerVolumeSpecName "kube-api-access-rrl5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.255056 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e22d80c8-5706-475b-a385-00c22ad2eaea-kube-api-access-b485r" (OuterVolumeSpecName: "kube-api-access-b485r") pod "e22d80c8-5706-475b-a385-00c22ad2eaea" (UID: "e22d80c8-5706-475b-a385-00c22ad2eaea"). InnerVolumeSpecName "kube-api-access-b485r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.255062 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a760fec5-b64d-43e2-bda1-aaaf4bd78a3c" (UID: "a760fec5-b64d-43e2-bda1-aaaf4bd78a3c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.255188 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e22d80c8-5706-475b-a385-00c22ad2eaea-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e22d80c8-5706-475b-a385-00c22ad2eaea" (UID: "e22d80c8-5706-475b-a385-00c22ad2eaea"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.349390 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.349423 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b485r\" (UniqueName: \"kubernetes.io/projected/e22d80c8-5706-475b-a385-00c22ad2eaea-kube-api-access-b485r\") on node \"crc\" DevicePath \"\"" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.349435 4857 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e22d80c8-5706-475b-a385-00c22ad2eaea-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.349443 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.349452 4857 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.349460 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e22d80c8-5706-475b-a385-00c22ad2eaea-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.349468 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrl5j\" (UniqueName: \"kubernetes.io/projected/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c-kube-api-access-rrl5j\") on node \"crc\" DevicePath \"\"" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.349476 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e22d80c8-5706-475b-a385-00c22ad2eaea-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.349485 4857 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e22d80c8-5706-475b-a385-00c22ad2eaea-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.505523 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb"] Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.512283 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-fv2tb"] Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.524120 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zpqmp"] Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.529980 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zpqmp"] Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.820408 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-bcb6ff979-jf6bj"] Nov 28 13:24:15 crc kubenswrapper[4857]: E1128 13:24:15.820894 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e22d80c8-5706-475b-a385-00c22ad2eaea" containerName="controller-manager" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.820906 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e22d80c8-5706-475b-a385-00c22ad2eaea" containerName="controller-manager" Nov 28 13:24:15 crc kubenswrapper[4857]: E1128 13:24:15.820914 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.820919 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 13:24:15 crc kubenswrapper[4857]: E1128 13:24:15.820929 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a760fec5-b64d-43e2-bda1-aaaf4bd78a3c" containerName="route-controller-manager" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.820934 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a760fec5-b64d-43e2-bda1-aaaf4bd78a3c" containerName="route-controller-manager" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.821030 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.821041 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a760fec5-b64d-43e2-bda1-aaaf4bd78a3c" containerName="route-controller-manager" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.821048 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e22d80c8-5706-475b-a385-00c22ad2eaea" containerName="controller-manager" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.821415 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.823641 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l"] Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.824127 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.824577 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.824648 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.824881 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.825914 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.826137 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.826342 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.826452 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.826470 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.826898 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.827130 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.827263 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.827986 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.834193 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-bcb6ff979-jf6bj"] Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.834517 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.838683 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l"] Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.957114 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/81d66c10-d120-411a-8ff9-46ffc607b67d-proxy-ca-bundles\") pod \"controller-manager-bcb6ff979-jf6bj\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.957175 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/32e9d91a-703a-4f61-bcc5-bf59f465b22c-client-ca\") pod \"route-controller-manager-5cc86f69f5-l5c4l\" (UID: \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\") " pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.957194 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81d66c10-d120-411a-8ff9-46ffc607b67d-config\") pod \"controller-manager-bcb6ff979-jf6bj\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.957219 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bhzg\" (UniqueName: \"kubernetes.io/projected/32e9d91a-703a-4f61-bcc5-bf59f465b22c-kube-api-access-4bhzg\") pod \"route-controller-manager-5cc86f69f5-l5c4l\" (UID: \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\") " pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.957364 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxlw2\" (UniqueName: \"kubernetes.io/projected/81d66c10-d120-411a-8ff9-46ffc607b67d-kube-api-access-rxlw2\") pod \"controller-manager-bcb6ff979-jf6bj\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.957502 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/32e9d91a-703a-4f61-bcc5-bf59f465b22c-serving-cert\") pod \"route-controller-manager-5cc86f69f5-l5c4l\" (UID: \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\") " pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.957545 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/81d66c10-d120-411a-8ff9-46ffc607b67d-client-ca\") pod \"controller-manager-bcb6ff979-jf6bj\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.957569 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/81d66c10-d120-411a-8ff9-46ffc607b67d-serving-cert\") pod \"controller-manager-bcb6ff979-jf6bj\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:15 crc kubenswrapper[4857]: I1128 13:24:15.957589 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/32e9d91a-703a-4f61-bcc5-bf59f465b22c-config\") pod \"route-controller-manager-5cc86f69f5-l5c4l\" (UID: \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\") " pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.059102 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/32e9d91a-703a-4f61-bcc5-bf59f465b22c-serving-cert\") pod \"route-controller-manager-5cc86f69f5-l5c4l\" (UID: \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\") " pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.059165 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/81d66c10-d120-411a-8ff9-46ffc607b67d-client-ca\") pod \"controller-manager-bcb6ff979-jf6bj\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.059188 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/81d66c10-d120-411a-8ff9-46ffc607b67d-serving-cert\") pod \"controller-manager-bcb6ff979-jf6bj\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.059205 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/32e9d91a-703a-4f61-bcc5-bf59f465b22c-config\") pod \"route-controller-manager-5cc86f69f5-l5c4l\" (UID: \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\") " pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.059245 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/81d66c10-d120-411a-8ff9-46ffc607b67d-proxy-ca-bundles\") pod \"controller-manager-bcb6ff979-jf6bj\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.059291 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/32e9d91a-703a-4f61-bcc5-bf59f465b22c-client-ca\") pod \"route-controller-manager-5cc86f69f5-l5c4l\" (UID: \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\") " pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.059314 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81d66c10-d120-411a-8ff9-46ffc607b67d-config\") pod \"controller-manager-bcb6ff979-jf6bj\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.059337 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bhzg\" (UniqueName: \"kubernetes.io/projected/32e9d91a-703a-4f61-bcc5-bf59f465b22c-kube-api-access-4bhzg\") pod \"route-controller-manager-5cc86f69f5-l5c4l\" (UID: \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\") " pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.059392 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxlw2\" (UniqueName: \"kubernetes.io/projected/81d66c10-d120-411a-8ff9-46ffc607b67d-kube-api-access-rxlw2\") pod \"controller-manager-bcb6ff979-jf6bj\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.060702 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/81d66c10-d120-411a-8ff9-46ffc607b67d-proxy-ca-bundles\") pod \"controller-manager-bcb6ff979-jf6bj\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.060784 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/81d66c10-d120-411a-8ff9-46ffc607b67d-client-ca\") pod \"controller-manager-bcb6ff979-jf6bj\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.060971 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81d66c10-d120-411a-8ff9-46ffc607b67d-config\") pod \"controller-manager-bcb6ff979-jf6bj\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.061143 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/32e9d91a-703a-4f61-bcc5-bf59f465b22c-client-ca\") pod \"route-controller-manager-5cc86f69f5-l5c4l\" (UID: \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\") " pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.061173 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/32e9d91a-703a-4f61-bcc5-bf59f465b22c-config\") pod \"route-controller-manager-5cc86f69f5-l5c4l\" (UID: \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\") " pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.067262 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/32e9d91a-703a-4f61-bcc5-bf59f465b22c-serving-cert\") pod \"route-controller-manager-5cc86f69f5-l5c4l\" (UID: \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\") " pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.070418 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/81d66c10-d120-411a-8ff9-46ffc607b67d-serving-cert\") pod \"controller-manager-bcb6ff979-jf6bj\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.102475 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bhzg\" (UniqueName: \"kubernetes.io/projected/32e9d91a-703a-4f61-bcc5-bf59f465b22c-kube-api-access-4bhzg\") pod \"route-controller-manager-5cc86f69f5-l5c4l\" (UID: \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\") " pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.110842 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxlw2\" (UniqueName: \"kubernetes.io/projected/81d66c10-d120-411a-8ff9-46ffc607b67d-kube-api-access-rxlw2\") pod \"controller-manager-bcb6ff979-jf6bj\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.176343 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.181945 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.316146 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a760fec5-b64d-43e2-bda1-aaaf4bd78a3c" path="/var/lib/kubelet/pods/a760fec5-b64d-43e2-bda1-aaaf4bd78a3c/volumes" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.316807 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e22d80c8-5706-475b-a385-00c22ad2eaea" path="/var/lib/kubelet/pods/e22d80c8-5706-475b-a385-00c22ad2eaea/volumes" Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.606027 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l"] Nov 28 13:24:16 crc kubenswrapper[4857]: W1128 13:24:16.609894 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod32e9d91a_703a_4f61_bcc5_bf59f465b22c.slice/crio-478307892de766ce370827ed296adae5b0f4f3ef792f5deb6897d10de1251076 WatchSource:0}: Error finding container 478307892de766ce370827ed296adae5b0f4f3ef792f5deb6897d10de1251076: Status 404 returned error can't find the container with id 478307892de766ce370827ed296adae5b0f4f3ef792f5deb6897d10de1251076 Nov 28 13:24:16 crc kubenswrapper[4857]: W1128 13:24:16.610226 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod81d66c10_d120_411a_8ff9_46ffc607b67d.slice/crio-5dd1a3a4a3a08e1aa50bf14ef5deea7eca7941be0d58d5d9373fae2c0d8201ba WatchSource:0}: Error finding container 5dd1a3a4a3a08e1aa50bf14ef5deea7eca7941be0d58d5d9373fae2c0d8201ba: Status 404 returned error can't find the container with id 5dd1a3a4a3a08e1aa50bf14ef5deea7eca7941be0d58d5d9373fae2c0d8201ba Nov 28 13:24:16 crc kubenswrapper[4857]: I1128 13:24:16.610365 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-bcb6ff979-jf6bj"] Nov 28 13:24:17 crc kubenswrapper[4857]: I1128 13:24:17.193148 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" event={"ID":"32e9d91a-703a-4f61-bcc5-bf59f465b22c","Type":"ContainerStarted","Data":"fb0a5b15884d6f86c5a1102bfb8950fdb16af17a40fc0c122f559b8a81c163ce"} Nov 28 13:24:17 crc kubenswrapper[4857]: I1128 13:24:17.193207 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" event={"ID":"32e9d91a-703a-4f61-bcc5-bf59f465b22c","Type":"ContainerStarted","Data":"478307892de766ce370827ed296adae5b0f4f3ef792f5deb6897d10de1251076"} Nov 28 13:24:17 crc kubenswrapper[4857]: I1128 13:24:17.194168 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" Nov 28 13:24:17 crc kubenswrapper[4857]: I1128 13:24:17.195393 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" event={"ID":"81d66c10-d120-411a-8ff9-46ffc607b67d","Type":"ContainerStarted","Data":"1ac2f7f0331c8a846c2028cd9a627897cc47cd344b5076a9899b7e2e9246b57c"} Nov 28 13:24:17 crc kubenswrapper[4857]: I1128 13:24:17.195426 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" event={"ID":"81d66c10-d120-411a-8ff9-46ffc607b67d","Type":"ContainerStarted","Data":"5dd1a3a4a3a08e1aa50bf14ef5deea7eca7941be0d58d5d9373fae2c0d8201ba"} Nov 28 13:24:17 crc kubenswrapper[4857]: I1128 13:24:17.195969 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:17 crc kubenswrapper[4857]: I1128 13:24:17.199550 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:17 crc kubenswrapper[4857]: I1128 13:24:17.200136 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" Nov 28 13:24:17 crc kubenswrapper[4857]: I1128 13:24:17.210819 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" podStartSLOduration=3.210795616 podStartE2EDuration="3.210795616s" podCreationTimestamp="2025-11-28 13:24:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:24:17.206992052 +0000 UTC m=+349.234367219" watchObservedRunningTime="2025-11-28 13:24:17.210795616 +0000 UTC m=+349.238170783" Nov 28 13:24:17 crc kubenswrapper[4857]: I1128 13:24:17.225055 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" podStartSLOduration=3.225032246 podStartE2EDuration="3.225032246s" podCreationTimestamp="2025-11-28 13:24:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:24:17.219363805 +0000 UTC m=+349.246738972" watchObservedRunningTime="2025-11-28 13:24:17.225032246 +0000 UTC m=+349.252407413" Nov 28 13:24:33 crc kubenswrapper[4857]: I1128 13:24:33.178305 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:24:33 crc kubenswrapper[4857]: I1128 13:24:33.179120 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:24:54 crc kubenswrapper[4857]: I1128 13:24:54.635637 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-bcb6ff979-jf6bj"] Nov 28 13:24:54 crc kubenswrapper[4857]: I1128 13:24:54.636532 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" podUID="81d66c10-d120-411a-8ff9-46ffc607b67d" containerName="controller-manager" containerID="cri-o://1ac2f7f0331c8a846c2028cd9a627897cc47cd344b5076a9899b7e2e9246b57c" gracePeriod=30 Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.108421 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.168689 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/81d66c10-d120-411a-8ff9-46ffc607b67d-proxy-ca-bundles\") pod \"81d66c10-d120-411a-8ff9-46ffc607b67d\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.168738 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/81d66c10-d120-411a-8ff9-46ffc607b67d-client-ca\") pod \"81d66c10-d120-411a-8ff9-46ffc607b67d\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.169477 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81d66c10-d120-411a-8ff9-46ffc607b67d-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "81d66c10-d120-411a-8ff9-46ffc607b67d" (UID: "81d66c10-d120-411a-8ff9-46ffc607b67d"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.169693 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81d66c10-d120-411a-8ff9-46ffc607b67d-client-ca" (OuterVolumeSpecName: "client-ca") pod "81d66c10-d120-411a-8ff9-46ffc607b67d" (UID: "81d66c10-d120-411a-8ff9-46ffc607b67d"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.169879 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxlw2\" (UniqueName: \"kubernetes.io/projected/81d66c10-d120-411a-8ff9-46ffc607b67d-kube-api-access-rxlw2\") pod \"81d66c10-d120-411a-8ff9-46ffc607b67d\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.170657 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81d66c10-d120-411a-8ff9-46ffc607b67d-config" (OuterVolumeSpecName: "config") pod "81d66c10-d120-411a-8ff9-46ffc607b67d" (UID: "81d66c10-d120-411a-8ff9-46ffc607b67d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.171116 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81d66c10-d120-411a-8ff9-46ffc607b67d-config\") pod \"81d66c10-d120-411a-8ff9-46ffc607b67d\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.171210 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/81d66c10-d120-411a-8ff9-46ffc607b67d-serving-cert\") pod \"81d66c10-d120-411a-8ff9-46ffc607b67d\" (UID: \"81d66c10-d120-411a-8ff9-46ffc607b67d\") " Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.171550 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81d66c10-d120-411a-8ff9-46ffc607b67d-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.171601 4857 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/81d66c10-d120-411a-8ff9-46ffc607b67d-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.171617 4857 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/81d66c10-d120-411a-8ff9-46ffc607b67d-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.180940 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81d66c10-d120-411a-8ff9-46ffc607b67d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "81d66c10-d120-411a-8ff9-46ffc607b67d" (UID: "81d66c10-d120-411a-8ff9-46ffc607b67d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.181061 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81d66c10-d120-411a-8ff9-46ffc607b67d-kube-api-access-rxlw2" (OuterVolumeSpecName: "kube-api-access-rxlw2") pod "81d66c10-d120-411a-8ff9-46ffc607b67d" (UID: "81d66c10-d120-411a-8ff9-46ffc607b67d"). InnerVolumeSpecName "kube-api-access-rxlw2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.272220 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/81d66c10-d120-411a-8ff9-46ffc607b67d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.272251 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxlw2\" (UniqueName: \"kubernetes.io/projected/81d66c10-d120-411a-8ff9-46ffc607b67d-kube-api-access-rxlw2\") on node \"crc\" DevicePath \"\"" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.397880 4857 generic.go:334] "Generic (PLEG): container finished" podID="81d66c10-d120-411a-8ff9-46ffc607b67d" containerID="1ac2f7f0331c8a846c2028cd9a627897cc47cd344b5076a9899b7e2e9246b57c" exitCode=0 Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.397935 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" event={"ID":"81d66c10-d120-411a-8ff9-46ffc607b67d","Type":"ContainerDied","Data":"1ac2f7f0331c8a846c2028cd9a627897cc47cd344b5076a9899b7e2e9246b57c"} Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.397972 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" event={"ID":"81d66c10-d120-411a-8ff9-46ffc607b67d","Type":"ContainerDied","Data":"5dd1a3a4a3a08e1aa50bf14ef5deea7eca7941be0d58d5d9373fae2c0d8201ba"} Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.398001 4857 scope.go:117] "RemoveContainer" containerID="1ac2f7f0331c8a846c2028cd9a627897cc47cd344b5076a9899b7e2e9246b57c" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.398156 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-bcb6ff979-jf6bj" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.431954 4857 scope.go:117] "RemoveContainer" containerID="1ac2f7f0331c8a846c2028cd9a627897cc47cd344b5076a9899b7e2e9246b57c" Nov 28 13:24:55 crc kubenswrapper[4857]: E1128 13:24:55.432958 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ac2f7f0331c8a846c2028cd9a627897cc47cd344b5076a9899b7e2e9246b57c\": container with ID starting with 1ac2f7f0331c8a846c2028cd9a627897cc47cd344b5076a9899b7e2e9246b57c not found: ID does not exist" containerID="1ac2f7f0331c8a846c2028cd9a627897cc47cd344b5076a9899b7e2e9246b57c" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.433003 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ac2f7f0331c8a846c2028cd9a627897cc47cd344b5076a9899b7e2e9246b57c"} err="failed to get container status \"1ac2f7f0331c8a846c2028cd9a627897cc47cd344b5076a9899b7e2e9246b57c\": rpc error: code = NotFound desc = could not find container \"1ac2f7f0331c8a846c2028cd9a627897cc47cd344b5076a9899b7e2e9246b57c\": container with ID starting with 1ac2f7f0331c8a846c2028cd9a627897cc47cd344b5076a9899b7e2e9246b57c not found: ID does not exist" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.446022 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-bcb6ff979-jf6bj"] Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.454537 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-bcb6ff979-jf6bj"] Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.856286 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-74995b9b64-22dpv"] Nov 28 13:24:55 crc kubenswrapper[4857]: E1128 13:24:55.857454 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81d66c10-d120-411a-8ff9-46ffc607b67d" containerName="controller-manager" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.857549 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="81d66c10-d120-411a-8ff9-46ffc607b67d" containerName="controller-manager" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.857782 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="81d66c10-d120-411a-8ff9-46ffc607b67d" containerName="controller-manager" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.858351 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.863559 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.863588 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.863783 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.863959 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.864548 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.864730 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.870054 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-74995b9b64-22dpv"] Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.873931 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.882254 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b822bc05-27a7-4073-bbd7-daffd78718f8-proxy-ca-bundles\") pod \"controller-manager-74995b9b64-22dpv\" (UID: \"b822bc05-27a7-4073-bbd7-daffd78718f8\") " pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.882288 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b822bc05-27a7-4073-bbd7-daffd78718f8-serving-cert\") pod \"controller-manager-74995b9b64-22dpv\" (UID: \"b822bc05-27a7-4073-bbd7-daffd78718f8\") " pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.882332 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b822bc05-27a7-4073-bbd7-daffd78718f8-config\") pod \"controller-manager-74995b9b64-22dpv\" (UID: \"b822bc05-27a7-4073-bbd7-daffd78718f8\") " pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.882357 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b822bc05-27a7-4073-bbd7-daffd78718f8-client-ca\") pod \"controller-manager-74995b9b64-22dpv\" (UID: \"b822bc05-27a7-4073-bbd7-daffd78718f8\") " pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.882371 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftsh6\" (UniqueName: \"kubernetes.io/projected/b822bc05-27a7-4073-bbd7-daffd78718f8-kube-api-access-ftsh6\") pod \"controller-manager-74995b9b64-22dpv\" (UID: \"b822bc05-27a7-4073-bbd7-daffd78718f8\") " pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.983857 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b822bc05-27a7-4073-bbd7-daffd78718f8-proxy-ca-bundles\") pod \"controller-manager-74995b9b64-22dpv\" (UID: \"b822bc05-27a7-4073-bbd7-daffd78718f8\") " pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.983919 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b822bc05-27a7-4073-bbd7-daffd78718f8-serving-cert\") pod \"controller-manager-74995b9b64-22dpv\" (UID: \"b822bc05-27a7-4073-bbd7-daffd78718f8\") " pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.983975 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b822bc05-27a7-4073-bbd7-daffd78718f8-config\") pod \"controller-manager-74995b9b64-22dpv\" (UID: \"b822bc05-27a7-4073-bbd7-daffd78718f8\") " pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.984007 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftsh6\" (UniqueName: \"kubernetes.io/projected/b822bc05-27a7-4073-bbd7-daffd78718f8-kube-api-access-ftsh6\") pod \"controller-manager-74995b9b64-22dpv\" (UID: \"b822bc05-27a7-4073-bbd7-daffd78718f8\") " pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.984026 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b822bc05-27a7-4073-bbd7-daffd78718f8-client-ca\") pod \"controller-manager-74995b9b64-22dpv\" (UID: \"b822bc05-27a7-4073-bbd7-daffd78718f8\") " pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.985142 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b822bc05-27a7-4073-bbd7-daffd78718f8-client-ca\") pod \"controller-manager-74995b9b64-22dpv\" (UID: \"b822bc05-27a7-4073-bbd7-daffd78718f8\") " pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.985992 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b822bc05-27a7-4073-bbd7-daffd78718f8-proxy-ca-bundles\") pod \"controller-manager-74995b9b64-22dpv\" (UID: \"b822bc05-27a7-4073-bbd7-daffd78718f8\") " pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.986631 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b822bc05-27a7-4073-bbd7-daffd78718f8-config\") pod \"controller-manager-74995b9b64-22dpv\" (UID: \"b822bc05-27a7-4073-bbd7-daffd78718f8\") " pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:55 crc kubenswrapper[4857]: I1128 13:24:55.989526 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b822bc05-27a7-4073-bbd7-daffd78718f8-serving-cert\") pod \"controller-manager-74995b9b64-22dpv\" (UID: \"b822bc05-27a7-4073-bbd7-daffd78718f8\") " pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:56 crc kubenswrapper[4857]: I1128 13:24:56.006884 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftsh6\" (UniqueName: \"kubernetes.io/projected/b822bc05-27a7-4073-bbd7-daffd78718f8-kube-api-access-ftsh6\") pod \"controller-manager-74995b9b64-22dpv\" (UID: \"b822bc05-27a7-4073-bbd7-daffd78718f8\") " pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:56 crc kubenswrapper[4857]: I1128 13:24:56.183394 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:56 crc kubenswrapper[4857]: I1128 13:24:56.318259 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81d66c10-d120-411a-8ff9-46ffc607b67d" path="/var/lib/kubelet/pods/81d66c10-d120-411a-8ff9-46ffc607b67d/volumes" Nov 28 13:24:56 crc kubenswrapper[4857]: I1128 13:24:56.371631 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-74995b9b64-22dpv"] Nov 28 13:24:56 crc kubenswrapper[4857]: I1128 13:24:56.416639 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" event={"ID":"b822bc05-27a7-4073-bbd7-daffd78718f8","Type":"ContainerStarted","Data":"72d2c6f4d13d4063a6221cc5ce4355b6159780b13c402ffe48932d89f5633091"} Nov 28 13:24:57 crc kubenswrapper[4857]: I1128 13:24:57.425155 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" event={"ID":"b822bc05-27a7-4073-bbd7-daffd78718f8","Type":"ContainerStarted","Data":"af7c74ab573dde87dcb0b9431f30342385761e472175afe1c179df950efbb294"} Nov 28 13:24:57 crc kubenswrapper[4857]: I1128 13:24:57.425547 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:57 crc kubenswrapper[4857]: I1128 13:24:57.429565 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" Nov 28 13:24:57 crc kubenswrapper[4857]: I1128 13:24:57.474294 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-74995b9b64-22dpv" podStartSLOduration=3.474277755 podStartE2EDuration="3.474277755s" podCreationTimestamp="2025-11-28 13:24:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:24:57.452139517 +0000 UTC m=+389.479514674" watchObservedRunningTime="2025-11-28 13:24:57.474277755 +0000 UTC m=+389.501652922" Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.520599 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-b678s"] Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.521575 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-b678s" podUID="fd1f9d7f-303b-4372-8937-0a7b31e45355" containerName="registry-server" containerID="cri-o://61ddf71dddee65074db13459715fe261575b723a7f61a6eb6159f13c0576576e" gracePeriod=30 Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.526196 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-r2fq8"] Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.527103 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-r2fq8" podUID="28962db4-abc0-431e-832c-01246a09d048" containerName="registry-server" containerID="cri-o://d55cce3979b934f81fb0eba894e38b7ef2f36e552951331fd7b3fcb168e0f8a9" gracePeriod=30 Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.537573 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5jcnx"] Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.537838 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" podUID="30934d71-ae7e-491a-933a-f1667b3608e4" containerName="marketplace-operator" containerID="cri-o://129fce64d5d4ac6365cba484402d8bec6af17369085f3a5147ca1f1d1462ca71" gracePeriod=30 Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.543575 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fsv5j"] Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.543880 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fsv5j" podUID="fd3db365-db2a-4a0b-9485-bd38e8da6614" containerName="registry-server" containerID="cri-o://910e566d5eb148da2b48c1e9737b5084007dd5e7c0a27a8b95a646b7d5494877" gracePeriod=30 Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.555180 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wxvfq"] Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.555507 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wxvfq" podUID="7f760076-0358-4b0e-9b50-0d3d05d29a0e" containerName="registry-server" containerID="cri-o://4549f3b8edb5c5e42a161320654b635cf675f0b4efdc1441970b0adf53041b9f" gracePeriod=30 Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.557701 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bhdpz"] Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.558569 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bhdpz" Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.573807 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bhdpz"] Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.635363 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/89b4b3ec-4394-4e95-9877-330c0613be93-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-bhdpz\" (UID: \"89b4b3ec-4394-4e95-9877-330c0613be93\") " pod="openshift-marketplace/marketplace-operator-79b997595-bhdpz" Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.635441 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lclf\" (UniqueName: \"kubernetes.io/projected/89b4b3ec-4394-4e95-9877-330c0613be93-kube-api-access-8lclf\") pod \"marketplace-operator-79b997595-bhdpz\" (UID: \"89b4b3ec-4394-4e95-9877-330c0613be93\") " pod="openshift-marketplace/marketplace-operator-79b997595-bhdpz" Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.635545 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/89b4b3ec-4394-4e95-9877-330c0613be93-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-bhdpz\" (UID: \"89b4b3ec-4394-4e95-9877-330c0613be93\") " pod="openshift-marketplace/marketplace-operator-79b997595-bhdpz" Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.738310 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lclf\" (UniqueName: \"kubernetes.io/projected/89b4b3ec-4394-4e95-9877-330c0613be93-kube-api-access-8lclf\") pod \"marketplace-operator-79b997595-bhdpz\" (UID: \"89b4b3ec-4394-4e95-9877-330c0613be93\") " pod="openshift-marketplace/marketplace-operator-79b997595-bhdpz" Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.738647 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/89b4b3ec-4394-4e95-9877-330c0613be93-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-bhdpz\" (UID: \"89b4b3ec-4394-4e95-9877-330c0613be93\") " pod="openshift-marketplace/marketplace-operator-79b997595-bhdpz" Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.738713 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/89b4b3ec-4394-4e95-9877-330c0613be93-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-bhdpz\" (UID: \"89b4b3ec-4394-4e95-9877-330c0613be93\") " pod="openshift-marketplace/marketplace-operator-79b997595-bhdpz" Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.739870 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/89b4b3ec-4394-4e95-9877-330c0613be93-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-bhdpz\" (UID: \"89b4b3ec-4394-4e95-9877-330c0613be93\") " pod="openshift-marketplace/marketplace-operator-79b997595-bhdpz" Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.745578 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/89b4b3ec-4394-4e95-9877-330c0613be93-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-bhdpz\" (UID: \"89b4b3ec-4394-4e95-9877-330c0613be93\") " pod="openshift-marketplace/marketplace-operator-79b997595-bhdpz" Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.773235 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lclf\" (UniqueName: \"kubernetes.io/projected/89b4b3ec-4394-4e95-9877-330c0613be93-kube-api-access-8lclf\") pod \"marketplace-operator-79b997595-bhdpz\" (UID: \"89b4b3ec-4394-4e95-9877-330c0613be93\") " pod="openshift-marketplace/marketplace-operator-79b997595-bhdpz" Nov 28 13:24:59 crc kubenswrapper[4857]: I1128 13:24:59.878297 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bhdpz" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.025297 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b678s" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.112400 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r2fq8" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.125402 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wxvfq" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.142897 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28962db4-abc0-431e-832c-01246a09d048-catalog-content\") pod \"28962db4-abc0-431e-832c-01246a09d048\" (UID: \"28962db4-abc0-431e-832c-01246a09d048\") " Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.143127 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd1f9d7f-303b-4372-8937-0a7b31e45355-catalog-content\") pod \"fd1f9d7f-303b-4372-8937-0a7b31e45355\" (UID: \"fd1f9d7f-303b-4372-8937-0a7b31e45355\") " Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.143162 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd1f9d7f-303b-4372-8937-0a7b31e45355-utilities\") pod \"fd1f9d7f-303b-4372-8937-0a7b31e45355\" (UID: \"fd1f9d7f-303b-4372-8937-0a7b31e45355\") " Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.143193 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28962db4-abc0-431e-832c-01246a09d048-utilities\") pod \"28962db4-abc0-431e-832c-01246a09d048\" (UID: \"28962db4-abc0-431e-832c-01246a09d048\") " Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.143234 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ph868\" (UniqueName: \"kubernetes.io/projected/fd1f9d7f-303b-4372-8937-0a7b31e45355-kube-api-access-ph868\") pod \"fd1f9d7f-303b-4372-8937-0a7b31e45355\" (UID: \"fd1f9d7f-303b-4372-8937-0a7b31e45355\") " Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.143294 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5dsks\" (UniqueName: \"kubernetes.io/projected/28962db4-abc0-431e-832c-01246a09d048-kube-api-access-5dsks\") pod \"28962db4-abc0-431e-832c-01246a09d048\" (UID: \"28962db4-abc0-431e-832c-01246a09d048\") " Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.143954 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd1f9d7f-303b-4372-8937-0a7b31e45355-utilities" (OuterVolumeSpecName: "utilities") pod "fd1f9d7f-303b-4372-8937-0a7b31e45355" (UID: "fd1f9d7f-303b-4372-8937-0a7b31e45355"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.144101 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28962db4-abc0-431e-832c-01246a09d048-utilities" (OuterVolumeSpecName: "utilities") pod "28962db4-abc0-431e-832c-01246a09d048" (UID: "28962db4-abc0-431e-832c-01246a09d048"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.148047 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28962db4-abc0-431e-832c-01246a09d048-kube-api-access-5dsks" (OuterVolumeSpecName: "kube-api-access-5dsks") pod "28962db4-abc0-431e-832c-01246a09d048" (UID: "28962db4-abc0-431e-832c-01246a09d048"). InnerVolumeSpecName "kube-api-access-5dsks". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.148064 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd1f9d7f-303b-4372-8937-0a7b31e45355-kube-api-access-ph868" (OuterVolumeSpecName: "kube-api-access-ph868") pod "fd1f9d7f-303b-4372-8937-0a7b31e45355" (UID: "fd1f9d7f-303b-4372-8937-0a7b31e45355"). InnerVolumeSpecName "kube-api-access-ph868". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.154036 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.173597 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fsv5j" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.216039 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28962db4-abc0-431e-832c-01246a09d048-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "28962db4-abc0-431e-832c-01246a09d048" (UID: "28962db4-abc0-431e-832c-01246a09d048"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.219392 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd1f9d7f-303b-4372-8937-0a7b31e45355-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fd1f9d7f-303b-4372-8937-0a7b31e45355" (UID: "fd1f9d7f-303b-4372-8937-0a7b31e45355"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.244464 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wkngh\" (UniqueName: \"kubernetes.io/projected/fd3db365-db2a-4a0b-9485-bd38e8da6614-kube-api-access-wkngh\") pod \"fd3db365-db2a-4a0b-9485-bd38e8da6614\" (UID: \"fd3db365-db2a-4a0b-9485-bd38e8da6614\") " Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.244502 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd3db365-db2a-4a0b-9485-bd38e8da6614-utilities\") pod \"fd3db365-db2a-4a0b-9485-bd38e8da6614\" (UID: \"fd3db365-db2a-4a0b-9485-bd38e8da6614\") " Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.244526 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/30934d71-ae7e-491a-933a-f1667b3608e4-marketplace-operator-metrics\") pod \"30934d71-ae7e-491a-933a-f1667b3608e4\" (UID: \"30934d71-ae7e-491a-933a-f1667b3608e4\") " Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.244547 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8cz4z\" (UniqueName: \"kubernetes.io/projected/30934d71-ae7e-491a-933a-f1667b3608e4-kube-api-access-8cz4z\") pod \"30934d71-ae7e-491a-933a-f1667b3608e4\" (UID: \"30934d71-ae7e-491a-933a-f1667b3608e4\") " Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.244563 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f760076-0358-4b0e-9b50-0d3d05d29a0e-catalog-content\") pod \"7f760076-0358-4b0e-9b50-0d3d05d29a0e\" (UID: \"7f760076-0358-4b0e-9b50-0d3d05d29a0e\") " Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.244612 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f760076-0358-4b0e-9b50-0d3d05d29a0e-utilities\") pod \"7f760076-0358-4b0e-9b50-0d3d05d29a0e\" (UID: \"7f760076-0358-4b0e-9b50-0d3d05d29a0e\") " Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.244642 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fj78\" (UniqueName: \"kubernetes.io/projected/7f760076-0358-4b0e-9b50-0d3d05d29a0e-kube-api-access-2fj78\") pod \"7f760076-0358-4b0e-9b50-0d3d05d29a0e\" (UID: \"7f760076-0358-4b0e-9b50-0d3d05d29a0e\") " Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.244667 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd3db365-db2a-4a0b-9485-bd38e8da6614-catalog-content\") pod \"fd3db365-db2a-4a0b-9485-bd38e8da6614\" (UID: \"fd3db365-db2a-4a0b-9485-bd38e8da6614\") " Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.245454 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/30934d71-ae7e-491a-933a-f1667b3608e4-marketplace-trusted-ca\") pod \"30934d71-ae7e-491a-933a-f1667b3608e4\" (UID: \"30934d71-ae7e-491a-933a-f1667b3608e4\") " Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.245613 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f760076-0358-4b0e-9b50-0d3d05d29a0e-utilities" (OuterVolumeSpecName: "utilities") pod "7f760076-0358-4b0e-9b50-0d3d05d29a0e" (UID: "7f760076-0358-4b0e-9b50-0d3d05d29a0e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.245802 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28962db4-abc0-431e-832c-01246a09d048-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.245818 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd1f9d7f-303b-4372-8937-0a7b31e45355-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.245827 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd1f9d7f-303b-4372-8937-0a7b31e45355-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.245836 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28962db4-abc0-431e-832c-01246a09d048-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.245844 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ph868\" (UniqueName: \"kubernetes.io/projected/fd1f9d7f-303b-4372-8937-0a7b31e45355-kube-api-access-ph868\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.245854 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5dsks\" (UniqueName: \"kubernetes.io/projected/28962db4-abc0-431e-832c-01246a09d048-kube-api-access-5dsks\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.246016 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/30934d71-ae7e-491a-933a-f1667b3608e4-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "30934d71-ae7e-491a-933a-f1667b3608e4" (UID: "30934d71-ae7e-491a-933a-f1667b3608e4"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.246163 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd3db365-db2a-4a0b-9485-bd38e8da6614-utilities" (OuterVolumeSpecName: "utilities") pod "fd3db365-db2a-4a0b-9485-bd38e8da6614" (UID: "fd3db365-db2a-4a0b-9485-bd38e8da6614"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.247673 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd3db365-db2a-4a0b-9485-bd38e8da6614-kube-api-access-wkngh" (OuterVolumeSpecName: "kube-api-access-wkngh") pod "fd3db365-db2a-4a0b-9485-bd38e8da6614" (UID: "fd3db365-db2a-4a0b-9485-bd38e8da6614"). InnerVolumeSpecName "kube-api-access-wkngh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.247844 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f760076-0358-4b0e-9b50-0d3d05d29a0e-kube-api-access-2fj78" (OuterVolumeSpecName: "kube-api-access-2fj78") pod "7f760076-0358-4b0e-9b50-0d3d05d29a0e" (UID: "7f760076-0358-4b0e-9b50-0d3d05d29a0e"). InnerVolumeSpecName "kube-api-access-2fj78". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.248259 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30934d71-ae7e-491a-933a-f1667b3608e4-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "30934d71-ae7e-491a-933a-f1667b3608e4" (UID: "30934d71-ae7e-491a-933a-f1667b3608e4"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.249676 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30934d71-ae7e-491a-933a-f1667b3608e4-kube-api-access-8cz4z" (OuterVolumeSpecName: "kube-api-access-8cz4z") pod "30934d71-ae7e-491a-933a-f1667b3608e4" (UID: "30934d71-ae7e-491a-933a-f1667b3608e4"). InnerVolumeSpecName "kube-api-access-8cz4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.268938 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd3db365-db2a-4a0b-9485-bd38e8da6614-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fd3db365-db2a-4a0b-9485-bd38e8da6614" (UID: "fd3db365-db2a-4a0b-9485-bd38e8da6614"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.348455 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f760076-0358-4b0e-9b50-0d3d05d29a0e-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.348487 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fj78\" (UniqueName: \"kubernetes.io/projected/7f760076-0358-4b0e-9b50-0d3d05d29a0e-kube-api-access-2fj78\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.348503 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd3db365-db2a-4a0b-9485-bd38e8da6614-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.348513 4857 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/30934d71-ae7e-491a-933a-f1667b3608e4-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.348523 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wkngh\" (UniqueName: \"kubernetes.io/projected/fd3db365-db2a-4a0b-9485-bd38e8da6614-kube-api-access-wkngh\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.348533 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd3db365-db2a-4a0b-9485-bd38e8da6614-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.348545 4857 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/30934d71-ae7e-491a-933a-f1667b3608e4-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.348554 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8cz4z\" (UniqueName: \"kubernetes.io/projected/30934d71-ae7e-491a-933a-f1667b3608e4-kube-api-access-8cz4z\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.359995 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f760076-0358-4b0e-9b50-0d3d05d29a0e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7f760076-0358-4b0e-9b50-0d3d05d29a0e" (UID: "7f760076-0358-4b0e-9b50-0d3d05d29a0e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.443385 4857 generic.go:334] "Generic (PLEG): container finished" podID="28962db4-abc0-431e-832c-01246a09d048" containerID="d55cce3979b934f81fb0eba894e38b7ef2f36e552951331fd7b3fcb168e0f8a9" exitCode=0 Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.443451 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r2fq8" event={"ID":"28962db4-abc0-431e-832c-01246a09d048","Type":"ContainerDied","Data":"d55cce3979b934f81fb0eba894e38b7ef2f36e552951331fd7b3fcb168e0f8a9"} Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.443484 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r2fq8" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.443490 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r2fq8" event={"ID":"28962db4-abc0-431e-832c-01246a09d048","Type":"ContainerDied","Data":"50cf0d8944057c5457a9daaa7a90efe605ae15f7e5bcaee6f673b2f4441eac31"} Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.443503 4857 scope.go:117] "RemoveContainer" containerID="d55cce3979b934f81fb0eba894e38b7ef2f36e552951331fd7b3fcb168e0f8a9" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.446937 4857 generic.go:334] "Generic (PLEG): container finished" podID="7f760076-0358-4b0e-9b50-0d3d05d29a0e" containerID="4549f3b8edb5c5e42a161320654b635cf675f0b4efdc1441970b0adf53041b9f" exitCode=0 Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.447033 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxvfq" event={"ID":"7f760076-0358-4b0e-9b50-0d3d05d29a0e","Type":"ContainerDied","Data":"4549f3b8edb5c5e42a161320654b635cf675f0b4efdc1441970b0adf53041b9f"} Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.447089 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxvfq" event={"ID":"7f760076-0358-4b0e-9b50-0d3d05d29a0e","Type":"ContainerDied","Data":"325d487328be1cc2bf55a055253658f31a551b877d6704ee85b76eaa925093b6"} Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.447190 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wxvfq" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.448875 4857 generic.go:334] "Generic (PLEG): container finished" podID="30934d71-ae7e-491a-933a-f1667b3608e4" containerID="129fce64d5d4ac6365cba484402d8bec6af17369085f3a5147ca1f1d1462ca71" exitCode=0 Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.448966 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" event={"ID":"30934d71-ae7e-491a-933a-f1667b3608e4","Type":"ContainerDied","Data":"129fce64d5d4ac6365cba484402d8bec6af17369085f3a5147ca1f1d1462ca71"} Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.449023 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" event={"ID":"30934d71-ae7e-491a-933a-f1667b3608e4","Type":"ContainerDied","Data":"b81aa615ad4190d371ac5c03599170c68e75ce4d7a084f34a986ad8a770da707"} Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.449111 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-5jcnx" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.449423 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f760076-0358-4b0e-9b50-0d3d05d29a0e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.453400 4857 generic.go:334] "Generic (PLEG): container finished" podID="fd1f9d7f-303b-4372-8937-0a7b31e45355" containerID="61ddf71dddee65074db13459715fe261575b723a7f61a6eb6159f13c0576576e" exitCode=0 Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.453456 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b678s" event={"ID":"fd1f9d7f-303b-4372-8937-0a7b31e45355","Type":"ContainerDied","Data":"61ddf71dddee65074db13459715fe261575b723a7f61a6eb6159f13c0576576e"} Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.453480 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b678s" event={"ID":"fd1f9d7f-303b-4372-8937-0a7b31e45355","Type":"ContainerDied","Data":"1c386439f77d9ed57c84302df4dd5942884675de400f092ce814d479f72e8cd8"} Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.453542 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b678s" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.459677 4857 generic.go:334] "Generic (PLEG): container finished" podID="fd3db365-db2a-4a0b-9485-bd38e8da6614" containerID="910e566d5eb148da2b48c1e9737b5084007dd5e7c0a27a8b95a646b7d5494877" exitCode=0 Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.459714 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fsv5j" event={"ID":"fd3db365-db2a-4a0b-9485-bd38e8da6614","Type":"ContainerDied","Data":"910e566d5eb148da2b48c1e9737b5084007dd5e7c0a27a8b95a646b7d5494877"} Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.459743 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fsv5j" event={"ID":"fd3db365-db2a-4a0b-9485-bd38e8da6614","Type":"ContainerDied","Data":"e5c9816c462b4de8223ab2a3f5344f939e9212b1e587755d3ec821c80aa3a911"} Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.459799 4857 scope.go:117] "RemoveContainer" containerID="a59abcc27d2ccfb35218ae24ff7f255734a53244847f37b47e4f71ef201c4893" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.459907 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fsv5j" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.471053 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bhdpz"] Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.478694 4857 scope.go:117] "RemoveContainer" containerID="9f0cfe541d98be84834e53201766d3a7fbc0c3557230adb129da9ec7b32e7bf0" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.481429 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-r2fq8"] Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.496711 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-r2fq8"] Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.506890 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5jcnx"] Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.510061 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5jcnx"] Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.517779 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-b678s"] Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.518380 4857 scope.go:117] "RemoveContainer" containerID="d55cce3979b934f81fb0eba894e38b7ef2f36e552951331fd7b3fcb168e0f8a9" Nov 28 13:25:00 crc kubenswrapper[4857]: E1128 13:25:00.518830 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d55cce3979b934f81fb0eba894e38b7ef2f36e552951331fd7b3fcb168e0f8a9\": container with ID starting with d55cce3979b934f81fb0eba894e38b7ef2f36e552951331fd7b3fcb168e0f8a9 not found: ID does not exist" containerID="d55cce3979b934f81fb0eba894e38b7ef2f36e552951331fd7b3fcb168e0f8a9" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.518860 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d55cce3979b934f81fb0eba894e38b7ef2f36e552951331fd7b3fcb168e0f8a9"} err="failed to get container status \"d55cce3979b934f81fb0eba894e38b7ef2f36e552951331fd7b3fcb168e0f8a9\": rpc error: code = NotFound desc = could not find container \"d55cce3979b934f81fb0eba894e38b7ef2f36e552951331fd7b3fcb168e0f8a9\": container with ID starting with d55cce3979b934f81fb0eba894e38b7ef2f36e552951331fd7b3fcb168e0f8a9 not found: ID does not exist" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.518882 4857 scope.go:117] "RemoveContainer" containerID="a59abcc27d2ccfb35218ae24ff7f255734a53244847f37b47e4f71ef201c4893" Nov 28 13:25:00 crc kubenswrapper[4857]: E1128 13:25:00.519253 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a59abcc27d2ccfb35218ae24ff7f255734a53244847f37b47e4f71ef201c4893\": container with ID starting with a59abcc27d2ccfb35218ae24ff7f255734a53244847f37b47e4f71ef201c4893 not found: ID does not exist" containerID="a59abcc27d2ccfb35218ae24ff7f255734a53244847f37b47e4f71ef201c4893" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.519276 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a59abcc27d2ccfb35218ae24ff7f255734a53244847f37b47e4f71ef201c4893"} err="failed to get container status \"a59abcc27d2ccfb35218ae24ff7f255734a53244847f37b47e4f71ef201c4893\": rpc error: code = NotFound desc = could not find container \"a59abcc27d2ccfb35218ae24ff7f255734a53244847f37b47e4f71ef201c4893\": container with ID starting with a59abcc27d2ccfb35218ae24ff7f255734a53244847f37b47e4f71ef201c4893 not found: ID does not exist" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.519293 4857 scope.go:117] "RemoveContainer" containerID="9f0cfe541d98be84834e53201766d3a7fbc0c3557230adb129da9ec7b32e7bf0" Nov 28 13:25:00 crc kubenswrapper[4857]: E1128 13:25:00.519572 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f0cfe541d98be84834e53201766d3a7fbc0c3557230adb129da9ec7b32e7bf0\": container with ID starting with 9f0cfe541d98be84834e53201766d3a7fbc0c3557230adb129da9ec7b32e7bf0 not found: ID does not exist" containerID="9f0cfe541d98be84834e53201766d3a7fbc0c3557230adb129da9ec7b32e7bf0" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.519598 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f0cfe541d98be84834e53201766d3a7fbc0c3557230adb129da9ec7b32e7bf0"} err="failed to get container status \"9f0cfe541d98be84834e53201766d3a7fbc0c3557230adb129da9ec7b32e7bf0\": rpc error: code = NotFound desc = could not find container \"9f0cfe541d98be84834e53201766d3a7fbc0c3557230adb129da9ec7b32e7bf0\": container with ID starting with 9f0cfe541d98be84834e53201766d3a7fbc0c3557230adb129da9ec7b32e7bf0 not found: ID does not exist" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.519615 4857 scope.go:117] "RemoveContainer" containerID="4549f3b8edb5c5e42a161320654b635cf675f0b4efdc1441970b0adf53041b9f" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.521721 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-b678s"] Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.529603 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wxvfq"] Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.532546 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wxvfq"] Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.535230 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fsv5j"] Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.536074 4857 scope.go:117] "RemoveContainer" containerID="c130f9082d669e254909d53a0287d9ad2b78c4b457e788a5df983dea6c2d813d" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.538239 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fsv5j"] Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.552649 4857 scope.go:117] "RemoveContainer" containerID="13dd25304dd06595a8c15426903d9104adfb8e838e57bdeb028cfa5ee7828793" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.571941 4857 scope.go:117] "RemoveContainer" containerID="4549f3b8edb5c5e42a161320654b635cf675f0b4efdc1441970b0adf53041b9f" Nov 28 13:25:00 crc kubenswrapper[4857]: E1128 13:25:00.572292 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4549f3b8edb5c5e42a161320654b635cf675f0b4efdc1441970b0adf53041b9f\": container with ID starting with 4549f3b8edb5c5e42a161320654b635cf675f0b4efdc1441970b0adf53041b9f not found: ID does not exist" containerID="4549f3b8edb5c5e42a161320654b635cf675f0b4efdc1441970b0adf53041b9f" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.572320 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4549f3b8edb5c5e42a161320654b635cf675f0b4efdc1441970b0adf53041b9f"} err="failed to get container status \"4549f3b8edb5c5e42a161320654b635cf675f0b4efdc1441970b0adf53041b9f\": rpc error: code = NotFound desc = could not find container \"4549f3b8edb5c5e42a161320654b635cf675f0b4efdc1441970b0adf53041b9f\": container with ID starting with 4549f3b8edb5c5e42a161320654b635cf675f0b4efdc1441970b0adf53041b9f not found: ID does not exist" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.572340 4857 scope.go:117] "RemoveContainer" containerID="c130f9082d669e254909d53a0287d9ad2b78c4b457e788a5df983dea6c2d813d" Nov 28 13:25:00 crc kubenswrapper[4857]: E1128 13:25:00.573053 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c130f9082d669e254909d53a0287d9ad2b78c4b457e788a5df983dea6c2d813d\": container with ID starting with c130f9082d669e254909d53a0287d9ad2b78c4b457e788a5df983dea6c2d813d not found: ID does not exist" containerID="c130f9082d669e254909d53a0287d9ad2b78c4b457e788a5df983dea6c2d813d" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.573127 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c130f9082d669e254909d53a0287d9ad2b78c4b457e788a5df983dea6c2d813d"} err="failed to get container status \"c130f9082d669e254909d53a0287d9ad2b78c4b457e788a5df983dea6c2d813d\": rpc error: code = NotFound desc = could not find container \"c130f9082d669e254909d53a0287d9ad2b78c4b457e788a5df983dea6c2d813d\": container with ID starting with c130f9082d669e254909d53a0287d9ad2b78c4b457e788a5df983dea6c2d813d not found: ID does not exist" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.573152 4857 scope.go:117] "RemoveContainer" containerID="13dd25304dd06595a8c15426903d9104adfb8e838e57bdeb028cfa5ee7828793" Nov 28 13:25:00 crc kubenswrapper[4857]: E1128 13:25:00.573603 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13dd25304dd06595a8c15426903d9104adfb8e838e57bdeb028cfa5ee7828793\": container with ID starting with 13dd25304dd06595a8c15426903d9104adfb8e838e57bdeb028cfa5ee7828793 not found: ID does not exist" containerID="13dd25304dd06595a8c15426903d9104adfb8e838e57bdeb028cfa5ee7828793" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.573626 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13dd25304dd06595a8c15426903d9104adfb8e838e57bdeb028cfa5ee7828793"} err="failed to get container status \"13dd25304dd06595a8c15426903d9104adfb8e838e57bdeb028cfa5ee7828793\": rpc error: code = NotFound desc = could not find container \"13dd25304dd06595a8c15426903d9104adfb8e838e57bdeb028cfa5ee7828793\": container with ID starting with 13dd25304dd06595a8c15426903d9104adfb8e838e57bdeb028cfa5ee7828793 not found: ID does not exist" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.573641 4857 scope.go:117] "RemoveContainer" containerID="129fce64d5d4ac6365cba484402d8bec6af17369085f3a5147ca1f1d1462ca71" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.585662 4857 scope.go:117] "RemoveContainer" containerID="129fce64d5d4ac6365cba484402d8bec6af17369085f3a5147ca1f1d1462ca71" Nov 28 13:25:00 crc kubenswrapper[4857]: E1128 13:25:00.586150 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"129fce64d5d4ac6365cba484402d8bec6af17369085f3a5147ca1f1d1462ca71\": container with ID starting with 129fce64d5d4ac6365cba484402d8bec6af17369085f3a5147ca1f1d1462ca71 not found: ID does not exist" containerID="129fce64d5d4ac6365cba484402d8bec6af17369085f3a5147ca1f1d1462ca71" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.586189 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"129fce64d5d4ac6365cba484402d8bec6af17369085f3a5147ca1f1d1462ca71"} err="failed to get container status \"129fce64d5d4ac6365cba484402d8bec6af17369085f3a5147ca1f1d1462ca71\": rpc error: code = NotFound desc = could not find container \"129fce64d5d4ac6365cba484402d8bec6af17369085f3a5147ca1f1d1462ca71\": container with ID starting with 129fce64d5d4ac6365cba484402d8bec6af17369085f3a5147ca1f1d1462ca71 not found: ID does not exist" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.586234 4857 scope.go:117] "RemoveContainer" containerID="61ddf71dddee65074db13459715fe261575b723a7f61a6eb6159f13c0576576e" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.602207 4857 scope.go:117] "RemoveContainer" containerID="ac9a4344bfcb7847e265522f2a687dd54a9a0951ddd36b235f61b1241fbd6a6e" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.624652 4857 scope.go:117] "RemoveContainer" containerID="1dce511039c1734f457a25431f52922ed7dd42c1cbfd3370a4fd4c59dc384093" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.651871 4857 scope.go:117] "RemoveContainer" containerID="61ddf71dddee65074db13459715fe261575b723a7f61a6eb6159f13c0576576e" Nov 28 13:25:00 crc kubenswrapper[4857]: E1128 13:25:00.652638 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61ddf71dddee65074db13459715fe261575b723a7f61a6eb6159f13c0576576e\": container with ID starting with 61ddf71dddee65074db13459715fe261575b723a7f61a6eb6159f13c0576576e not found: ID does not exist" containerID="61ddf71dddee65074db13459715fe261575b723a7f61a6eb6159f13c0576576e" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.653055 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61ddf71dddee65074db13459715fe261575b723a7f61a6eb6159f13c0576576e"} err="failed to get container status \"61ddf71dddee65074db13459715fe261575b723a7f61a6eb6159f13c0576576e\": rpc error: code = NotFound desc = could not find container \"61ddf71dddee65074db13459715fe261575b723a7f61a6eb6159f13c0576576e\": container with ID starting with 61ddf71dddee65074db13459715fe261575b723a7f61a6eb6159f13c0576576e not found: ID does not exist" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.653097 4857 scope.go:117] "RemoveContainer" containerID="ac9a4344bfcb7847e265522f2a687dd54a9a0951ddd36b235f61b1241fbd6a6e" Nov 28 13:25:00 crc kubenswrapper[4857]: E1128 13:25:00.653531 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac9a4344bfcb7847e265522f2a687dd54a9a0951ddd36b235f61b1241fbd6a6e\": container with ID starting with ac9a4344bfcb7847e265522f2a687dd54a9a0951ddd36b235f61b1241fbd6a6e not found: ID does not exist" containerID="ac9a4344bfcb7847e265522f2a687dd54a9a0951ddd36b235f61b1241fbd6a6e" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.653564 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac9a4344bfcb7847e265522f2a687dd54a9a0951ddd36b235f61b1241fbd6a6e"} err="failed to get container status \"ac9a4344bfcb7847e265522f2a687dd54a9a0951ddd36b235f61b1241fbd6a6e\": rpc error: code = NotFound desc = could not find container \"ac9a4344bfcb7847e265522f2a687dd54a9a0951ddd36b235f61b1241fbd6a6e\": container with ID starting with ac9a4344bfcb7847e265522f2a687dd54a9a0951ddd36b235f61b1241fbd6a6e not found: ID does not exist" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.653585 4857 scope.go:117] "RemoveContainer" containerID="1dce511039c1734f457a25431f52922ed7dd42c1cbfd3370a4fd4c59dc384093" Nov 28 13:25:00 crc kubenswrapper[4857]: E1128 13:25:00.653792 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1dce511039c1734f457a25431f52922ed7dd42c1cbfd3370a4fd4c59dc384093\": container with ID starting with 1dce511039c1734f457a25431f52922ed7dd42c1cbfd3370a4fd4c59dc384093 not found: ID does not exist" containerID="1dce511039c1734f457a25431f52922ed7dd42c1cbfd3370a4fd4c59dc384093" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.653809 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dce511039c1734f457a25431f52922ed7dd42c1cbfd3370a4fd4c59dc384093"} err="failed to get container status \"1dce511039c1734f457a25431f52922ed7dd42c1cbfd3370a4fd4c59dc384093\": rpc error: code = NotFound desc = could not find container \"1dce511039c1734f457a25431f52922ed7dd42c1cbfd3370a4fd4c59dc384093\": container with ID starting with 1dce511039c1734f457a25431f52922ed7dd42c1cbfd3370a4fd4c59dc384093 not found: ID does not exist" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.653822 4857 scope.go:117] "RemoveContainer" containerID="910e566d5eb148da2b48c1e9737b5084007dd5e7c0a27a8b95a646b7d5494877" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.706705 4857 scope.go:117] "RemoveContainer" containerID="46e4b361d60e3ba86fb16195bc7b10cf3436e7968550aa498727abdcbc1e2f19" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.720843 4857 scope.go:117] "RemoveContainer" containerID="95bbb8e50a2ec7da2dbeec300e56fe9a39aea254e94d701d5bcfd6d99cd1cfe0" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.735514 4857 scope.go:117] "RemoveContainer" containerID="910e566d5eb148da2b48c1e9737b5084007dd5e7c0a27a8b95a646b7d5494877" Nov 28 13:25:00 crc kubenswrapper[4857]: E1128 13:25:00.735984 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"910e566d5eb148da2b48c1e9737b5084007dd5e7c0a27a8b95a646b7d5494877\": container with ID starting with 910e566d5eb148da2b48c1e9737b5084007dd5e7c0a27a8b95a646b7d5494877 not found: ID does not exist" containerID="910e566d5eb148da2b48c1e9737b5084007dd5e7c0a27a8b95a646b7d5494877" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.736021 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"910e566d5eb148da2b48c1e9737b5084007dd5e7c0a27a8b95a646b7d5494877"} err="failed to get container status \"910e566d5eb148da2b48c1e9737b5084007dd5e7c0a27a8b95a646b7d5494877\": rpc error: code = NotFound desc = could not find container \"910e566d5eb148da2b48c1e9737b5084007dd5e7c0a27a8b95a646b7d5494877\": container with ID starting with 910e566d5eb148da2b48c1e9737b5084007dd5e7c0a27a8b95a646b7d5494877 not found: ID does not exist" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.736045 4857 scope.go:117] "RemoveContainer" containerID="46e4b361d60e3ba86fb16195bc7b10cf3436e7968550aa498727abdcbc1e2f19" Nov 28 13:25:00 crc kubenswrapper[4857]: E1128 13:25:00.736506 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46e4b361d60e3ba86fb16195bc7b10cf3436e7968550aa498727abdcbc1e2f19\": container with ID starting with 46e4b361d60e3ba86fb16195bc7b10cf3436e7968550aa498727abdcbc1e2f19 not found: ID does not exist" containerID="46e4b361d60e3ba86fb16195bc7b10cf3436e7968550aa498727abdcbc1e2f19" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.736588 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46e4b361d60e3ba86fb16195bc7b10cf3436e7968550aa498727abdcbc1e2f19"} err="failed to get container status \"46e4b361d60e3ba86fb16195bc7b10cf3436e7968550aa498727abdcbc1e2f19\": rpc error: code = NotFound desc = could not find container \"46e4b361d60e3ba86fb16195bc7b10cf3436e7968550aa498727abdcbc1e2f19\": container with ID starting with 46e4b361d60e3ba86fb16195bc7b10cf3436e7968550aa498727abdcbc1e2f19 not found: ID does not exist" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.736733 4857 scope.go:117] "RemoveContainer" containerID="95bbb8e50a2ec7da2dbeec300e56fe9a39aea254e94d701d5bcfd6d99cd1cfe0" Nov 28 13:25:00 crc kubenswrapper[4857]: E1128 13:25:00.737092 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95bbb8e50a2ec7da2dbeec300e56fe9a39aea254e94d701d5bcfd6d99cd1cfe0\": container with ID starting with 95bbb8e50a2ec7da2dbeec300e56fe9a39aea254e94d701d5bcfd6d99cd1cfe0 not found: ID does not exist" containerID="95bbb8e50a2ec7da2dbeec300e56fe9a39aea254e94d701d5bcfd6d99cd1cfe0" Nov 28 13:25:00 crc kubenswrapper[4857]: I1128 13:25:00.737133 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95bbb8e50a2ec7da2dbeec300e56fe9a39aea254e94d701d5bcfd6d99cd1cfe0"} err="failed to get container status \"95bbb8e50a2ec7da2dbeec300e56fe9a39aea254e94d701d5bcfd6d99cd1cfe0\": rpc error: code = NotFound desc = could not find container \"95bbb8e50a2ec7da2dbeec300e56fe9a39aea254e94d701d5bcfd6d99cd1cfe0\": container with ID starting with 95bbb8e50a2ec7da2dbeec300e56fe9a39aea254e94d701d5bcfd6d99cd1cfe0 not found: ID does not exist" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.088655 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2pltp"] Nov 28 13:25:01 crc kubenswrapper[4857]: E1128 13:25:01.088909 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f760076-0358-4b0e-9b50-0d3d05d29a0e" containerName="registry-server" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.088926 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f760076-0358-4b0e-9b50-0d3d05d29a0e" containerName="registry-server" Nov 28 13:25:01 crc kubenswrapper[4857]: E1128 13:25:01.088939 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd3db365-db2a-4a0b-9485-bd38e8da6614" containerName="registry-server" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.088947 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd3db365-db2a-4a0b-9485-bd38e8da6614" containerName="registry-server" Nov 28 13:25:01 crc kubenswrapper[4857]: E1128 13:25:01.088962 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f760076-0358-4b0e-9b50-0d3d05d29a0e" containerName="extract-content" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.088973 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f760076-0358-4b0e-9b50-0d3d05d29a0e" containerName="extract-content" Nov 28 13:25:01 crc kubenswrapper[4857]: E1128 13:25:01.088982 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd3db365-db2a-4a0b-9485-bd38e8da6614" containerName="extract-content" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.088990 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd3db365-db2a-4a0b-9485-bd38e8da6614" containerName="extract-content" Nov 28 13:25:01 crc kubenswrapper[4857]: E1128 13:25:01.089005 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28962db4-abc0-431e-832c-01246a09d048" containerName="registry-server" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.089013 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="28962db4-abc0-431e-832c-01246a09d048" containerName="registry-server" Nov 28 13:25:01 crc kubenswrapper[4857]: E1128 13:25:01.089025 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd1f9d7f-303b-4372-8937-0a7b31e45355" containerName="extract-content" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.089032 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd1f9d7f-303b-4372-8937-0a7b31e45355" containerName="extract-content" Nov 28 13:25:01 crc kubenswrapper[4857]: E1128 13:25:01.089042 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd1f9d7f-303b-4372-8937-0a7b31e45355" containerName="registry-server" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.089048 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd1f9d7f-303b-4372-8937-0a7b31e45355" containerName="registry-server" Nov 28 13:25:01 crc kubenswrapper[4857]: E1128 13:25:01.089056 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28962db4-abc0-431e-832c-01246a09d048" containerName="extract-content" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.089063 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="28962db4-abc0-431e-832c-01246a09d048" containerName="extract-content" Nov 28 13:25:01 crc kubenswrapper[4857]: E1128 13:25:01.089076 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd1f9d7f-303b-4372-8937-0a7b31e45355" containerName="extract-utilities" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.089084 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd1f9d7f-303b-4372-8937-0a7b31e45355" containerName="extract-utilities" Nov 28 13:25:01 crc kubenswrapper[4857]: E1128 13:25:01.089094 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28962db4-abc0-431e-832c-01246a09d048" containerName="extract-utilities" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.089101 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="28962db4-abc0-431e-832c-01246a09d048" containerName="extract-utilities" Nov 28 13:25:01 crc kubenswrapper[4857]: E1128 13:25:01.089109 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd3db365-db2a-4a0b-9485-bd38e8da6614" containerName="extract-utilities" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.089117 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd3db365-db2a-4a0b-9485-bd38e8da6614" containerName="extract-utilities" Nov 28 13:25:01 crc kubenswrapper[4857]: E1128 13:25:01.089130 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30934d71-ae7e-491a-933a-f1667b3608e4" containerName="marketplace-operator" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.089137 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="30934d71-ae7e-491a-933a-f1667b3608e4" containerName="marketplace-operator" Nov 28 13:25:01 crc kubenswrapper[4857]: E1128 13:25:01.089147 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f760076-0358-4b0e-9b50-0d3d05d29a0e" containerName="extract-utilities" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.089154 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f760076-0358-4b0e-9b50-0d3d05d29a0e" containerName="extract-utilities" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.089271 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd1f9d7f-303b-4372-8937-0a7b31e45355" containerName="registry-server" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.089287 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="30934d71-ae7e-491a-933a-f1667b3608e4" containerName="marketplace-operator" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.089296 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="28962db4-abc0-431e-832c-01246a09d048" containerName="registry-server" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.089306 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f760076-0358-4b0e-9b50-0d3d05d29a0e" containerName="registry-server" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.089318 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd3db365-db2a-4a0b-9485-bd38e8da6614" containerName="registry-server" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.090181 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2pltp" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.092385 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.106458 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2pltp"] Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.158318 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhvbb\" (UniqueName: \"kubernetes.io/projected/87a26cb0-0ac0-44ca-8941-943e7e2bb155-kube-api-access-nhvbb\") pod \"redhat-marketplace-2pltp\" (UID: \"87a26cb0-0ac0-44ca-8941-943e7e2bb155\") " pod="openshift-marketplace/redhat-marketplace-2pltp" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.158382 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87a26cb0-0ac0-44ca-8941-943e7e2bb155-catalog-content\") pod \"redhat-marketplace-2pltp\" (UID: \"87a26cb0-0ac0-44ca-8941-943e7e2bb155\") " pod="openshift-marketplace/redhat-marketplace-2pltp" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.158484 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87a26cb0-0ac0-44ca-8941-943e7e2bb155-utilities\") pod \"redhat-marketplace-2pltp\" (UID: \"87a26cb0-0ac0-44ca-8941-943e7e2bb155\") " pod="openshift-marketplace/redhat-marketplace-2pltp" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.259807 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87a26cb0-0ac0-44ca-8941-943e7e2bb155-catalog-content\") pod \"redhat-marketplace-2pltp\" (UID: \"87a26cb0-0ac0-44ca-8941-943e7e2bb155\") " pod="openshift-marketplace/redhat-marketplace-2pltp" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.259886 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87a26cb0-0ac0-44ca-8941-943e7e2bb155-utilities\") pod \"redhat-marketplace-2pltp\" (UID: \"87a26cb0-0ac0-44ca-8941-943e7e2bb155\") " pod="openshift-marketplace/redhat-marketplace-2pltp" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.259940 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhvbb\" (UniqueName: \"kubernetes.io/projected/87a26cb0-0ac0-44ca-8941-943e7e2bb155-kube-api-access-nhvbb\") pod \"redhat-marketplace-2pltp\" (UID: \"87a26cb0-0ac0-44ca-8941-943e7e2bb155\") " pod="openshift-marketplace/redhat-marketplace-2pltp" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.260521 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87a26cb0-0ac0-44ca-8941-943e7e2bb155-catalog-content\") pod \"redhat-marketplace-2pltp\" (UID: \"87a26cb0-0ac0-44ca-8941-943e7e2bb155\") " pod="openshift-marketplace/redhat-marketplace-2pltp" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.260530 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87a26cb0-0ac0-44ca-8941-943e7e2bb155-utilities\") pod \"redhat-marketplace-2pltp\" (UID: \"87a26cb0-0ac0-44ca-8941-943e7e2bb155\") " pod="openshift-marketplace/redhat-marketplace-2pltp" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.278243 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhvbb\" (UniqueName: \"kubernetes.io/projected/87a26cb0-0ac0-44ca-8941-943e7e2bb155-kube-api-access-nhvbb\") pod \"redhat-marketplace-2pltp\" (UID: \"87a26cb0-0ac0-44ca-8941-943e7e2bb155\") " pod="openshift-marketplace/redhat-marketplace-2pltp" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.290334 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-s4dd7"] Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.292084 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s4dd7" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.294511 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.298807 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-s4dd7"] Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.361056 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34b5fe72-977e-444a-94ab-5a135d1a3417-utilities\") pod \"redhat-operators-s4dd7\" (UID: \"34b5fe72-977e-444a-94ab-5a135d1a3417\") " pod="openshift-marketplace/redhat-operators-s4dd7" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.361143 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34b5fe72-977e-444a-94ab-5a135d1a3417-catalog-content\") pod \"redhat-operators-s4dd7\" (UID: \"34b5fe72-977e-444a-94ab-5a135d1a3417\") " pod="openshift-marketplace/redhat-operators-s4dd7" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.361287 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jgkr\" (UniqueName: \"kubernetes.io/projected/34b5fe72-977e-444a-94ab-5a135d1a3417-kube-api-access-5jgkr\") pod \"redhat-operators-s4dd7\" (UID: \"34b5fe72-977e-444a-94ab-5a135d1a3417\") " pod="openshift-marketplace/redhat-operators-s4dd7" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.403506 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2pltp" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.462885 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34b5fe72-977e-444a-94ab-5a135d1a3417-catalog-content\") pod \"redhat-operators-s4dd7\" (UID: \"34b5fe72-977e-444a-94ab-5a135d1a3417\") " pod="openshift-marketplace/redhat-operators-s4dd7" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.462958 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jgkr\" (UniqueName: \"kubernetes.io/projected/34b5fe72-977e-444a-94ab-5a135d1a3417-kube-api-access-5jgkr\") pod \"redhat-operators-s4dd7\" (UID: \"34b5fe72-977e-444a-94ab-5a135d1a3417\") " pod="openshift-marketplace/redhat-operators-s4dd7" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.462997 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34b5fe72-977e-444a-94ab-5a135d1a3417-utilities\") pod \"redhat-operators-s4dd7\" (UID: \"34b5fe72-977e-444a-94ab-5a135d1a3417\") " pod="openshift-marketplace/redhat-operators-s4dd7" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.463347 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34b5fe72-977e-444a-94ab-5a135d1a3417-catalog-content\") pod \"redhat-operators-s4dd7\" (UID: \"34b5fe72-977e-444a-94ab-5a135d1a3417\") " pod="openshift-marketplace/redhat-operators-s4dd7" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.463529 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34b5fe72-977e-444a-94ab-5a135d1a3417-utilities\") pod \"redhat-operators-s4dd7\" (UID: \"34b5fe72-977e-444a-94ab-5a135d1a3417\") " pod="openshift-marketplace/redhat-operators-s4dd7" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.477175 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bhdpz" event={"ID":"89b4b3ec-4394-4e95-9877-330c0613be93","Type":"ContainerStarted","Data":"f2ad4e689bb71e96f73184241bd7763f28309b004ed7b471fcefe9b5cf631d65"} Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.477222 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bhdpz" event={"ID":"89b4b3ec-4394-4e95-9877-330c0613be93","Type":"ContainerStarted","Data":"e42ce32e5997ca03821b35a1fafe2476c4214cd8c42aa51fd872922dfb2301f2"} Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.477594 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-bhdpz" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.482434 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jgkr\" (UniqueName: \"kubernetes.io/projected/34b5fe72-977e-444a-94ab-5a135d1a3417-kube-api-access-5jgkr\") pod \"redhat-operators-s4dd7\" (UID: \"34b5fe72-977e-444a-94ab-5a135d1a3417\") " pod="openshift-marketplace/redhat-operators-s4dd7" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.482947 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-bhdpz" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.497060 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-bhdpz" podStartSLOduration=2.497042899 podStartE2EDuration="2.497042899s" podCreationTimestamp="2025-11-28 13:24:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:25:01.495604589 +0000 UTC m=+393.522979766" watchObservedRunningTime="2025-11-28 13:25:01.497042899 +0000 UTC m=+393.524418066" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.611117 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s4dd7" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.654988 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-qcsnc"] Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.655789 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.674234 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-qcsnc"] Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.766358 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/591610dd-3f95-40c0-9857-fb87cd5dea17-registry-tls\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.766410 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/591610dd-3f95-40c0-9857-fb87cd5dea17-ca-trust-extracted\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.766445 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.766482 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/591610dd-3f95-40c0-9857-fb87cd5dea17-registry-certificates\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.766513 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/591610dd-3f95-40c0-9857-fb87cd5dea17-installation-pull-secrets\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.766545 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/591610dd-3f95-40c0-9857-fb87cd5dea17-trusted-ca\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.766560 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4mgz\" (UniqueName: \"kubernetes.io/projected/591610dd-3f95-40c0-9857-fb87cd5dea17-kube-api-access-l4mgz\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.766579 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/591610dd-3f95-40c0-9857-fb87cd5dea17-bound-sa-token\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.796228 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.829628 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2pltp"] Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.869009 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/591610dd-3f95-40c0-9857-fb87cd5dea17-registry-certificates\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.869081 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/591610dd-3f95-40c0-9857-fb87cd5dea17-installation-pull-secrets\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.869189 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4mgz\" (UniqueName: \"kubernetes.io/projected/591610dd-3f95-40c0-9857-fb87cd5dea17-kube-api-access-l4mgz\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.869244 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/591610dd-3f95-40c0-9857-fb87cd5dea17-trusted-ca\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.869277 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/591610dd-3f95-40c0-9857-fb87cd5dea17-bound-sa-token\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.869350 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/591610dd-3f95-40c0-9857-fb87cd5dea17-registry-tls\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.869380 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/591610dd-3f95-40c0-9857-fb87cd5dea17-ca-trust-extracted\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.870652 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/591610dd-3f95-40c0-9857-fb87cd5dea17-registry-certificates\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.870846 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/591610dd-3f95-40c0-9857-fb87cd5dea17-ca-trust-extracted\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.875937 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/591610dd-3f95-40c0-9857-fb87cd5dea17-trusted-ca\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.883362 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/591610dd-3f95-40c0-9857-fb87cd5dea17-registry-tls\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.883817 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/591610dd-3f95-40c0-9857-fb87cd5dea17-installation-pull-secrets\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.886043 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/591610dd-3f95-40c0-9857-fb87cd5dea17-bound-sa-token\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:01 crc kubenswrapper[4857]: I1128 13:25:01.888314 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4mgz\" (UniqueName: \"kubernetes.io/projected/591610dd-3f95-40c0-9857-fb87cd5dea17-kube-api-access-l4mgz\") pod \"image-registry-66df7c8f76-qcsnc\" (UID: \"591610dd-3f95-40c0-9857-fb87cd5dea17\") " pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:02 crc kubenswrapper[4857]: I1128 13:25:02.006327 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:02 crc kubenswrapper[4857]: I1128 13:25:02.077958 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-s4dd7"] Nov 28 13:25:02 crc kubenswrapper[4857]: W1128 13:25:02.080404 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod34b5fe72_977e_444a_94ab_5a135d1a3417.slice/crio-a05e4de83f3044f32762a6a05c3c41157d376c0e94a1ccd051300a7cb65182bb WatchSource:0}: Error finding container a05e4de83f3044f32762a6a05c3c41157d376c0e94a1ccd051300a7cb65182bb: Status 404 returned error can't find the container with id a05e4de83f3044f32762a6a05c3c41157d376c0e94a1ccd051300a7cb65182bb Nov 28 13:25:02 crc kubenswrapper[4857]: I1128 13:25:02.316404 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28962db4-abc0-431e-832c-01246a09d048" path="/var/lib/kubelet/pods/28962db4-abc0-431e-832c-01246a09d048/volumes" Nov 28 13:25:02 crc kubenswrapper[4857]: I1128 13:25:02.317698 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30934d71-ae7e-491a-933a-f1667b3608e4" path="/var/lib/kubelet/pods/30934d71-ae7e-491a-933a-f1667b3608e4/volumes" Nov 28 13:25:02 crc kubenswrapper[4857]: I1128 13:25:02.318306 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f760076-0358-4b0e-9b50-0d3d05d29a0e" path="/var/lib/kubelet/pods/7f760076-0358-4b0e-9b50-0d3d05d29a0e/volumes" Nov 28 13:25:02 crc kubenswrapper[4857]: I1128 13:25:02.320148 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd1f9d7f-303b-4372-8937-0a7b31e45355" path="/var/lib/kubelet/pods/fd1f9d7f-303b-4372-8937-0a7b31e45355/volumes" Nov 28 13:25:02 crc kubenswrapper[4857]: I1128 13:25:02.320949 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd3db365-db2a-4a0b-9485-bd38e8da6614" path="/var/lib/kubelet/pods/fd3db365-db2a-4a0b-9485-bd38e8da6614/volumes" Nov 28 13:25:02 crc kubenswrapper[4857]: I1128 13:25:02.428798 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-qcsnc"] Nov 28 13:25:02 crc kubenswrapper[4857]: W1128 13:25:02.440412 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod591610dd_3f95_40c0_9857_fb87cd5dea17.slice/crio-f4491788fc41ee29b4e9bfdc9364728566e0e373d2798b78f2fd46367a9f74bd WatchSource:0}: Error finding container f4491788fc41ee29b4e9bfdc9364728566e0e373d2798b78f2fd46367a9f74bd: Status 404 returned error can't find the container with id f4491788fc41ee29b4e9bfdc9364728566e0e373d2798b78f2fd46367a9f74bd Nov 28 13:25:02 crc kubenswrapper[4857]: I1128 13:25:02.484395 4857 generic.go:334] "Generic (PLEG): container finished" podID="87a26cb0-0ac0-44ca-8941-943e7e2bb155" containerID="bd01baab41c86d059a5874b767c3a33ab07cf45fd49961372ed78d2ca235714f" exitCode=0 Nov 28 13:25:02 crc kubenswrapper[4857]: I1128 13:25:02.484503 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2pltp" event={"ID":"87a26cb0-0ac0-44ca-8941-943e7e2bb155","Type":"ContainerDied","Data":"bd01baab41c86d059a5874b767c3a33ab07cf45fd49961372ed78d2ca235714f"} Nov 28 13:25:02 crc kubenswrapper[4857]: I1128 13:25:02.484628 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2pltp" event={"ID":"87a26cb0-0ac0-44ca-8941-943e7e2bb155","Type":"ContainerStarted","Data":"1021ee00cc8d226b9ecf015315765db4b10ba19e29faced04dbe101ceb615d56"} Nov 28 13:25:02 crc kubenswrapper[4857]: I1128 13:25:02.486476 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" event={"ID":"591610dd-3f95-40c0-9857-fb87cd5dea17","Type":"ContainerStarted","Data":"f4491788fc41ee29b4e9bfdc9364728566e0e373d2798b78f2fd46367a9f74bd"} Nov 28 13:25:02 crc kubenswrapper[4857]: I1128 13:25:02.489745 4857 generic.go:334] "Generic (PLEG): container finished" podID="34b5fe72-977e-444a-94ab-5a135d1a3417" containerID="128b0dd2e55373480dd78927cc7e3aa6557855d220f23718e269874d004f958a" exitCode=0 Nov 28 13:25:02 crc kubenswrapper[4857]: I1128 13:25:02.489889 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s4dd7" event={"ID":"34b5fe72-977e-444a-94ab-5a135d1a3417","Type":"ContainerDied","Data":"128b0dd2e55373480dd78927cc7e3aa6557855d220f23718e269874d004f958a"} Nov 28 13:25:02 crc kubenswrapper[4857]: I1128 13:25:02.489920 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s4dd7" event={"ID":"34b5fe72-977e-444a-94ab-5a135d1a3417","Type":"ContainerStarted","Data":"a05e4de83f3044f32762a6a05c3c41157d376c0e94a1ccd051300a7cb65182bb"} Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.178523 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.178892 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.487946 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cl69z"] Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.489218 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cl69z" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.491502 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.495090 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" event={"ID":"591610dd-3f95-40c0-9857-fb87cd5dea17","Type":"ContainerStarted","Data":"628c729705301f3d5523cecdffbae48e2989c40a4aa0d63173cdafd80b95e428"} Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.501588 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cl69z"] Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.535519 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" podStartSLOduration=2.53550402 podStartE2EDuration="2.53550402s" podCreationTimestamp="2025-11-28 13:25:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:25:03.534910893 +0000 UTC m=+395.562286080" watchObservedRunningTime="2025-11-28 13:25:03.53550402 +0000 UTC m=+395.562879187" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.589443 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0178b5af-6910-4201-99c0-0053310327a0-utilities\") pod \"certified-operators-cl69z\" (UID: \"0178b5af-6910-4201-99c0-0053310327a0\") " pod="openshift-marketplace/certified-operators-cl69z" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.589511 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h425t\" (UniqueName: \"kubernetes.io/projected/0178b5af-6910-4201-99c0-0053310327a0-kube-api-access-h425t\") pod \"certified-operators-cl69z\" (UID: \"0178b5af-6910-4201-99c0-0053310327a0\") " pod="openshift-marketplace/certified-operators-cl69z" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.589570 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0178b5af-6910-4201-99c0-0053310327a0-catalog-content\") pod \"certified-operators-cl69z\" (UID: \"0178b5af-6910-4201-99c0-0053310327a0\") " pod="openshift-marketplace/certified-operators-cl69z" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.690868 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0178b5af-6910-4201-99c0-0053310327a0-utilities\") pod \"certified-operators-cl69z\" (UID: \"0178b5af-6910-4201-99c0-0053310327a0\") " pod="openshift-marketplace/certified-operators-cl69z" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.690927 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h425t\" (UniqueName: \"kubernetes.io/projected/0178b5af-6910-4201-99c0-0053310327a0-kube-api-access-h425t\") pod \"certified-operators-cl69z\" (UID: \"0178b5af-6910-4201-99c0-0053310327a0\") " pod="openshift-marketplace/certified-operators-cl69z" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.690984 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0178b5af-6910-4201-99c0-0053310327a0-catalog-content\") pod \"certified-operators-cl69z\" (UID: \"0178b5af-6910-4201-99c0-0053310327a0\") " pod="openshift-marketplace/certified-operators-cl69z" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.691238 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hjkx5"] Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.691727 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0178b5af-6910-4201-99c0-0053310327a0-catalog-content\") pod \"certified-operators-cl69z\" (UID: \"0178b5af-6910-4201-99c0-0053310327a0\") " pod="openshift-marketplace/certified-operators-cl69z" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.691913 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0178b5af-6910-4201-99c0-0053310327a0-utilities\") pod \"certified-operators-cl69z\" (UID: \"0178b5af-6910-4201-99c0-0053310327a0\") " pod="openshift-marketplace/certified-operators-cl69z" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.693320 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hjkx5" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.698405 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.703516 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hjkx5"] Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.716981 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h425t\" (UniqueName: \"kubernetes.io/projected/0178b5af-6910-4201-99c0-0053310327a0-kube-api-access-h425t\") pod \"certified-operators-cl69z\" (UID: \"0178b5af-6910-4201-99c0-0053310327a0\") " pod="openshift-marketplace/certified-operators-cl69z" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.815311 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cl69z" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.893961 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/230a73ed-8a25-4d36-aebb-47f12ad15d7d-catalog-content\") pod \"community-operators-hjkx5\" (UID: \"230a73ed-8a25-4d36-aebb-47f12ad15d7d\") " pod="openshift-marketplace/community-operators-hjkx5" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.894503 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/230a73ed-8a25-4d36-aebb-47f12ad15d7d-utilities\") pod \"community-operators-hjkx5\" (UID: \"230a73ed-8a25-4d36-aebb-47f12ad15d7d\") " pod="openshift-marketplace/community-operators-hjkx5" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.894564 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9f52\" (UniqueName: \"kubernetes.io/projected/230a73ed-8a25-4d36-aebb-47f12ad15d7d-kube-api-access-n9f52\") pod \"community-operators-hjkx5\" (UID: \"230a73ed-8a25-4d36-aebb-47f12ad15d7d\") " pod="openshift-marketplace/community-operators-hjkx5" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.996281 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/230a73ed-8a25-4d36-aebb-47f12ad15d7d-catalog-content\") pod \"community-operators-hjkx5\" (UID: \"230a73ed-8a25-4d36-aebb-47f12ad15d7d\") " pod="openshift-marketplace/community-operators-hjkx5" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.996352 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/230a73ed-8a25-4d36-aebb-47f12ad15d7d-utilities\") pod \"community-operators-hjkx5\" (UID: \"230a73ed-8a25-4d36-aebb-47f12ad15d7d\") " pod="openshift-marketplace/community-operators-hjkx5" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.996401 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9f52\" (UniqueName: \"kubernetes.io/projected/230a73ed-8a25-4d36-aebb-47f12ad15d7d-kube-api-access-n9f52\") pod \"community-operators-hjkx5\" (UID: \"230a73ed-8a25-4d36-aebb-47f12ad15d7d\") " pod="openshift-marketplace/community-operators-hjkx5" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.997034 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/230a73ed-8a25-4d36-aebb-47f12ad15d7d-utilities\") pod \"community-operators-hjkx5\" (UID: \"230a73ed-8a25-4d36-aebb-47f12ad15d7d\") " pod="openshift-marketplace/community-operators-hjkx5" Nov 28 13:25:03 crc kubenswrapper[4857]: I1128 13:25:03.997086 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/230a73ed-8a25-4d36-aebb-47f12ad15d7d-catalog-content\") pod \"community-operators-hjkx5\" (UID: \"230a73ed-8a25-4d36-aebb-47f12ad15d7d\") " pod="openshift-marketplace/community-operators-hjkx5" Nov 28 13:25:04 crc kubenswrapper[4857]: I1128 13:25:04.017365 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9f52\" (UniqueName: \"kubernetes.io/projected/230a73ed-8a25-4d36-aebb-47f12ad15d7d-kube-api-access-n9f52\") pod \"community-operators-hjkx5\" (UID: \"230a73ed-8a25-4d36-aebb-47f12ad15d7d\") " pod="openshift-marketplace/community-operators-hjkx5" Nov 28 13:25:04 crc kubenswrapper[4857]: I1128 13:25:04.268198 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cl69z"] Nov 28 13:25:04 crc kubenswrapper[4857]: W1128 13:25:04.273268 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0178b5af_6910_4201_99c0_0053310327a0.slice/crio-3ae53f4618e15aa5c13a89a4454a242a48179e8173ffabf93b5c563fc43b84d5 WatchSource:0}: Error finding container 3ae53f4618e15aa5c13a89a4454a242a48179e8173ffabf93b5c563fc43b84d5: Status 404 returned error can't find the container with id 3ae53f4618e15aa5c13a89a4454a242a48179e8173ffabf93b5c563fc43b84d5 Nov 28 13:25:04 crc kubenswrapper[4857]: I1128 13:25:04.314064 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hjkx5" Nov 28 13:25:04 crc kubenswrapper[4857]: I1128 13:25:04.503618 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s4dd7" event={"ID":"34b5fe72-977e-444a-94ab-5a135d1a3417","Type":"ContainerStarted","Data":"3e5e24cf4347787c3650b69f8a0967bb959faf6a893493abe75932fe4f1b5193"} Nov 28 13:25:04 crc kubenswrapper[4857]: I1128 13:25:04.505983 4857 generic.go:334] "Generic (PLEG): container finished" podID="87a26cb0-0ac0-44ca-8941-943e7e2bb155" containerID="3ecf33bc2211be07cb671a3ad99c4b54730362f89f3584b2b5a19be00ccbde99" exitCode=0 Nov 28 13:25:04 crc kubenswrapper[4857]: I1128 13:25:04.506030 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2pltp" event={"ID":"87a26cb0-0ac0-44ca-8941-943e7e2bb155","Type":"ContainerDied","Data":"3ecf33bc2211be07cb671a3ad99c4b54730362f89f3584b2b5a19be00ccbde99"} Nov 28 13:25:04 crc kubenswrapper[4857]: I1128 13:25:04.508842 4857 generic.go:334] "Generic (PLEG): container finished" podID="0178b5af-6910-4201-99c0-0053310327a0" containerID="c1d9a05f4919651911fddfd86357eea75037b1a73d92e098697b06ff46659074" exitCode=0 Nov 28 13:25:04 crc kubenswrapper[4857]: I1128 13:25:04.508965 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cl69z" event={"ID":"0178b5af-6910-4201-99c0-0053310327a0","Type":"ContainerDied","Data":"c1d9a05f4919651911fddfd86357eea75037b1a73d92e098697b06ff46659074"} Nov 28 13:25:04 crc kubenswrapper[4857]: I1128 13:25:04.508987 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cl69z" event={"ID":"0178b5af-6910-4201-99c0-0053310327a0","Type":"ContainerStarted","Data":"3ae53f4618e15aa5c13a89a4454a242a48179e8173ffabf93b5c563fc43b84d5"} Nov 28 13:25:04 crc kubenswrapper[4857]: I1128 13:25:04.509141 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:04 crc kubenswrapper[4857]: I1128 13:25:04.793049 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hjkx5"] Nov 28 13:25:04 crc kubenswrapper[4857]: W1128 13:25:04.817046 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod230a73ed_8a25_4d36_aebb_47f12ad15d7d.slice/crio-3a6b3fd1a83e8a1362bf898a048a2b958a094ea732651861555524931dff0f40 WatchSource:0}: Error finding container 3a6b3fd1a83e8a1362bf898a048a2b958a094ea732651861555524931dff0f40: Status 404 returned error can't find the container with id 3a6b3fd1a83e8a1362bf898a048a2b958a094ea732651861555524931dff0f40 Nov 28 13:25:05 crc kubenswrapper[4857]: I1128 13:25:05.515212 4857 generic.go:334] "Generic (PLEG): container finished" podID="230a73ed-8a25-4d36-aebb-47f12ad15d7d" containerID="02d4e574f2a6a5b34029cbdfff8fe47986a19b50b8f25d8d00d1b74f65c4e37d" exitCode=0 Nov 28 13:25:05 crc kubenswrapper[4857]: I1128 13:25:05.515381 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjkx5" event={"ID":"230a73ed-8a25-4d36-aebb-47f12ad15d7d","Type":"ContainerDied","Data":"02d4e574f2a6a5b34029cbdfff8fe47986a19b50b8f25d8d00d1b74f65c4e37d"} Nov 28 13:25:05 crc kubenswrapper[4857]: I1128 13:25:05.515605 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjkx5" event={"ID":"230a73ed-8a25-4d36-aebb-47f12ad15d7d","Type":"ContainerStarted","Data":"3a6b3fd1a83e8a1362bf898a048a2b958a094ea732651861555524931dff0f40"} Nov 28 13:25:05 crc kubenswrapper[4857]: I1128 13:25:05.518106 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2pltp" event={"ID":"87a26cb0-0ac0-44ca-8941-943e7e2bb155","Type":"ContainerStarted","Data":"fca20fa82d32fd2d5fdab3eda4a039bf1e47cc7c9c694546b1b8c0e10e4071c1"} Nov 28 13:25:05 crc kubenswrapper[4857]: I1128 13:25:05.521241 4857 generic.go:334] "Generic (PLEG): container finished" podID="34b5fe72-977e-444a-94ab-5a135d1a3417" containerID="3e5e24cf4347787c3650b69f8a0967bb959faf6a893493abe75932fe4f1b5193" exitCode=0 Nov 28 13:25:05 crc kubenswrapper[4857]: I1128 13:25:05.521322 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s4dd7" event={"ID":"34b5fe72-977e-444a-94ab-5a135d1a3417","Type":"ContainerDied","Data":"3e5e24cf4347787c3650b69f8a0967bb959faf6a893493abe75932fe4f1b5193"} Nov 28 13:25:05 crc kubenswrapper[4857]: I1128 13:25:05.575386 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2pltp" podStartSLOduration=2.103172551 podStartE2EDuration="4.575367468s" podCreationTimestamp="2025-11-28 13:25:01 +0000 UTC" firstStartedPulling="2025-11-28 13:25:02.487340881 +0000 UTC m=+394.514716048" lastFinishedPulling="2025-11-28 13:25:04.959535808 +0000 UTC m=+396.986910965" observedRunningTime="2025-11-28 13:25:05.571784239 +0000 UTC m=+397.599159406" watchObservedRunningTime="2025-11-28 13:25:05.575367468 +0000 UTC m=+397.602742645" Nov 28 13:25:06 crc kubenswrapper[4857]: I1128 13:25:06.528603 4857 generic.go:334] "Generic (PLEG): container finished" podID="0178b5af-6910-4201-99c0-0053310327a0" containerID="9e5ee69b6f7158dfed8095fff0c9351adc29c01432f79ac99b6bf12410b890b7" exitCode=0 Nov 28 13:25:06 crc kubenswrapper[4857]: I1128 13:25:06.528649 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cl69z" event={"ID":"0178b5af-6910-4201-99c0-0053310327a0","Type":"ContainerDied","Data":"9e5ee69b6f7158dfed8095fff0c9351adc29c01432f79ac99b6bf12410b890b7"} Nov 28 13:25:09 crc kubenswrapper[4857]: I1128 13:25:09.547672 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cl69z" event={"ID":"0178b5af-6910-4201-99c0-0053310327a0","Type":"ContainerStarted","Data":"8a085aa42b3718824be241f7bdffb655f43072c718718c85512f37af4a837886"} Nov 28 13:25:09 crc kubenswrapper[4857]: I1128 13:25:09.550032 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s4dd7" event={"ID":"34b5fe72-977e-444a-94ab-5a135d1a3417","Type":"ContainerStarted","Data":"3fe600361cf6034f46d5060e7d93575d7ae368d6f6674bd4cad8e02efae143a4"} Nov 28 13:25:09 crc kubenswrapper[4857]: I1128 13:25:09.551563 4857 generic.go:334] "Generic (PLEG): container finished" podID="230a73ed-8a25-4d36-aebb-47f12ad15d7d" containerID="8c9280bd523ff82a0a85f7c94783654ad213b489748d2c848bc13eca6de211d0" exitCode=0 Nov 28 13:25:09 crc kubenswrapper[4857]: I1128 13:25:09.551596 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjkx5" event={"ID":"230a73ed-8a25-4d36-aebb-47f12ad15d7d","Type":"ContainerDied","Data":"8c9280bd523ff82a0a85f7c94783654ad213b489748d2c848bc13eca6de211d0"} Nov 28 13:25:09 crc kubenswrapper[4857]: I1128 13:25:09.585606 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-cl69z" podStartSLOduration=2.727272675 podStartE2EDuration="6.585587678s" podCreationTimestamp="2025-11-28 13:25:03 +0000 UTC" firstStartedPulling="2025-11-28 13:25:04.511185156 +0000 UTC m=+396.538560323" lastFinishedPulling="2025-11-28 13:25:08.369500159 +0000 UTC m=+400.396875326" observedRunningTime="2025-11-28 13:25:09.568126643 +0000 UTC m=+401.595501821" watchObservedRunningTime="2025-11-28 13:25:09.585587678 +0000 UTC m=+401.612962845" Nov 28 13:25:10 crc kubenswrapper[4857]: I1128 13:25:10.560912 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjkx5" event={"ID":"230a73ed-8a25-4d36-aebb-47f12ad15d7d","Type":"ContainerStarted","Data":"5ba61db65a5a1dc846d94921a35a98ef63fc17feeffd85959315dd9cf1092fce"} Nov 28 13:25:10 crc kubenswrapper[4857]: I1128 13:25:10.580644 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hjkx5" podStartSLOduration=3.016738187 podStartE2EDuration="7.580620982s" podCreationTimestamp="2025-11-28 13:25:03 +0000 UTC" firstStartedPulling="2025-11-28 13:25:05.517605935 +0000 UTC m=+397.544981102" lastFinishedPulling="2025-11-28 13:25:10.08148873 +0000 UTC m=+402.108863897" observedRunningTime="2025-11-28 13:25:10.580085877 +0000 UTC m=+402.607461094" watchObservedRunningTime="2025-11-28 13:25:10.580620982 +0000 UTC m=+402.607996139" Nov 28 13:25:10 crc kubenswrapper[4857]: I1128 13:25:10.582155 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-s4dd7" podStartSLOduration=3.830445816 podStartE2EDuration="9.582147384s" podCreationTimestamp="2025-11-28 13:25:01 +0000 UTC" firstStartedPulling="2025-11-28 13:25:02.494480519 +0000 UTC m=+394.521855706" lastFinishedPulling="2025-11-28 13:25:08.246182107 +0000 UTC m=+400.273557274" observedRunningTime="2025-11-28 13:25:09.608594037 +0000 UTC m=+401.635969214" watchObservedRunningTime="2025-11-28 13:25:10.582147384 +0000 UTC m=+402.609522541" Nov 28 13:25:11 crc kubenswrapper[4857]: I1128 13:25:11.403778 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2pltp" Nov 28 13:25:11 crc kubenswrapper[4857]: I1128 13:25:11.403858 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2pltp" Nov 28 13:25:11 crc kubenswrapper[4857]: I1128 13:25:11.452777 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2pltp" Nov 28 13:25:11 crc kubenswrapper[4857]: I1128 13:25:11.609399 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2pltp" Nov 28 13:25:11 crc kubenswrapper[4857]: I1128 13:25:11.611470 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-s4dd7" Nov 28 13:25:11 crc kubenswrapper[4857]: I1128 13:25:11.611573 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-s4dd7" Nov 28 13:25:12 crc kubenswrapper[4857]: I1128 13:25:12.664364 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-s4dd7" podUID="34b5fe72-977e-444a-94ab-5a135d1a3417" containerName="registry-server" probeResult="failure" output=< Nov 28 13:25:12 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 13:25:12 crc kubenswrapper[4857]: > Nov 28 13:25:13 crc kubenswrapper[4857]: I1128 13:25:13.815845 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-cl69z" Nov 28 13:25:13 crc kubenswrapper[4857]: I1128 13:25:13.816113 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-cl69z" Nov 28 13:25:13 crc kubenswrapper[4857]: I1128 13:25:13.873521 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-cl69z" Nov 28 13:25:14 crc kubenswrapper[4857]: I1128 13:25:14.320239 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hjkx5" Nov 28 13:25:14 crc kubenswrapper[4857]: I1128 13:25:14.320289 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hjkx5" Nov 28 13:25:14 crc kubenswrapper[4857]: I1128 13:25:14.389668 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hjkx5" Nov 28 13:25:14 crc kubenswrapper[4857]: I1128 13:25:14.626313 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-cl69z" Nov 28 13:25:14 crc kubenswrapper[4857]: I1128 13:25:14.638766 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l"] Nov 28 13:25:14 crc kubenswrapper[4857]: I1128 13:25:14.638996 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" podUID="32e9d91a-703a-4f61-bcc5-bf59f465b22c" containerName="route-controller-manager" containerID="cri-o://fb0a5b15884d6f86c5a1102bfb8950fdb16af17a40fc0c122f559b8a81c163ce" gracePeriod=30 Nov 28 13:25:16 crc kubenswrapper[4857]: I1128 13:25:16.182723 4857 patch_prober.go:28] interesting pod/route-controller-manager-5cc86f69f5-l5c4l container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.58:8443/healthz\": dial tcp 10.217.0.58:8443: connect: connection refused" start-of-body= Nov 28 13:25:16 crc kubenswrapper[4857]: I1128 13:25:16.183114 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" podUID="32e9d91a-703a-4f61-bcc5-bf59f465b22c" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.58:8443/healthz\": dial tcp 10.217.0.58:8443: connect: connection refused" Nov 28 13:25:17 crc kubenswrapper[4857]: I1128 13:25:17.601165 4857 generic.go:334] "Generic (PLEG): container finished" podID="32e9d91a-703a-4f61-bcc5-bf59f465b22c" containerID="fb0a5b15884d6f86c5a1102bfb8950fdb16af17a40fc0c122f559b8a81c163ce" exitCode=0 Nov 28 13:25:17 crc kubenswrapper[4857]: I1128 13:25:17.601243 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" event={"ID":"32e9d91a-703a-4f61-bcc5-bf59f465b22c","Type":"ContainerDied","Data":"fb0a5b15884d6f86c5a1102bfb8950fdb16af17a40fc0c122f559b8a81c163ce"} Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.105576 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.131654 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk"] Nov 28 13:25:20 crc kubenswrapper[4857]: E1128 13:25:20.131957 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32e9d91a-703a-4f61-bcc5-bf59f465b22c" containerName="route-controller-manager" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.131973 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="32e9d91a-703a-4f61-bcc5-bf59f465b22c" containerName="route-controller-manager" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.132118 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="32e9d91a-703a-4f61-bcc5-bf59f465b22c" containerName="route-controller-manager" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.132535 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.150204 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk"] Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.207683 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4bhzg\" (UniqueName: \"kubernetes.io/projected/32e9d91a-703a-4f61-bcc5-bf59f465b22c-kube-api-access-4bhzg\") pod \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\" (UID: \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\") " Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.207742 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/32e9d91a-703a-4f61-bcc5-bf59f465b22c-client-ca\") pod \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\" (UID: \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\") " Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.208052 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/32e9d91a-703a-4f61-bcc5-bf59f465b22c-config\") pod \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\" (UID: \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\") " Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.208139 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/32e9d91a-703a-4f61-bcc5-bf59f465b22c-serving-cert\") pod \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\" (UID: \"32e9d91a-703a-4f61-bcc5-bf59f465b22c\") " Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.208407 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d725595-1b9e-4bb0-ad5c-4a81da40243f-config\") pod \"route-controller-manager-59b8f58488-wbtsk\" (UID: \"2d725595-1b9e-4bb0-ad5c-4a81da40243f\") " pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.208454 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2d725595-1b9e-4bb0-ad5c-4a81da40243f-client-ca\") pod \"route-controller-manager-59b8f58488-wbtsk\" (UID: \"2d725595-1b9e-4bb0-ad5c-4a81da40243f\") " pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.208544 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/32e9d91a-703a-4f61-bcc5-bf59f465b22c-client-ca" (OuterVolumeSpecName: "client-ca") pod "32e9d91a-703a-4f61-bcc5-bf59f465b22c" (UID: "32e9d91a-703a-4f61-bcc5-bf59f465b22c"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.208643 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/32e9d91a-703a-4f61-bcc5-bf59f465b22c-config" (OuterVolumeSpecName: "config") pod "32e9d91a-703a-4f61-bcc5-bf59f465b22c" (UID: "32e9d91a-703a-4f61-bcc5-bf59f465b22c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.208683 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wtkq\" (UniqueName: \"kubernetes.io/projected/2d725595-1b9e-4bb0-ad5c-4a81da40243f-kube-api-access-2wtkq\") pod \"route-controller-manager-59b8f58488-wbtsk\" (UID: \"2d725595-1b9e-4bb0-ad5c-4a81da40243f\") " pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.208728 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d725595-1b9e-4bb0-ad5c-4a81da40243f-serving-cert\") pod \"route-controller-manager-59b8f58488-wbtsk\" (UID: \"2d725595-1b9e-4bb0-ad5c-4a81da40243f\") " pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.208852 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/32e9d91a-703a-4f61-bcc5-bf59f465b22c-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.208876 4857 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/32e9d91a-703a-4f61-bcc5-bf59f465b22c-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.214460 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32e9d91a-703a-4f61-bcc5-bf59f465b22c-kube-api-access-4bhzg" (OuterVolumeSpecName: "kube-api-access-4bhzg") pod "32e9d91a-703a-4f61-bcc5-bf59f465b22c" (UID: "32e9d91a-703a-4f61-bcc5-bf59f465b22c"). InnerVolumeSpecName "kube-api-access-4bhzg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.214619 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32e9d91a-703a-4f61-bcc5-bf59f465b22c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "32e9d91a-703a-4f61-bcc5-bf59f465b22c" (UID: "32e9d91a-703a-4f61-bcc5-bf59f465b22c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.309293 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wtkq\" (UniqueName: \"kubernetes.io/projected/2d725595-1b9e-4bb0-ad5c-4a81da40243f-kube-api-access-2wtkq\") pod \"route-controller-manager-59b8f58488-wbtsk\" (UID: \"2d725595-1b9e-4bb0-ad5c-4a81da40243f\") " pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.309348 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d725595-1b9e-4bb0-ad5c-4a81da40243f-serving-cert\") pod \"route-controller-manager-59b8f58488-wbtsk\" (UID: \"2d725595-1b9e-4bb0-ad5c-4a81da40243f\") " pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.309372 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d725595-1b9e-4bb0-ad5c-4a81da40243f-config\") pod \"route-controller-manager-59b8f58488-wbtsk\" (UID: \"2d725595-1b9e-4bb0-ad5c-4a81da40243f\") " pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.309390 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2d725595-1b9e-4bb0-ad5c-4a81da40243f-client-ca\") pod \"route-controller-manager-59b8f58488-wbtsk\" (UID: \"2d725595-1b9e-4bb0-ad5c-4a81da40243f\") " pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.309431 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/32e9d91a-703a-4f61-bcc5-bf59f465b22c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.309450 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4bhzg\" (UniqueName: \"kubernetes.io/projected/32e9d91a-703a-4f61-bcc5-bf59f465b22c-kube-api-access-4bhzg\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.310241 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2d725595-1b9e-4bb0-ad5c-4a81da40243f-client-ca\") pod \"route-controller-manager-59b8f58488-wbtsk\" (UID: \"2d725595-1b9e-4bb0-ad5c-4a81da40243f\") " pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.310986 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d725595-1b9e-4bb0-ad5c-4a81da40243f-config\") pod \"route-controller-manager-59b8f58488-wbtsk\" (UID: \"2d725595-1b9e-4bb0-ad5c-4a81da40243f\") " pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.316457 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d725595-1b9e-4bb0-ad5c-4a81da40243f-serving-cert\") pod \"route-controller-manager-59b8f58488-wbtsk\" (UID: \"2d725595-1b9e-4bb0-ad5c-4a81da40243f\") " pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.325834 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wtkq\" (UniqueName: \"kubernetes.io/projected/2d725595-1b9e-4bb0-ad5c-4a81da40243f-kube-api-access-2wtkq\") pod \"route-controller-manager-59b8f58488-wbtsk\" (UID: \"2d725595-1b9e-4bb0-ad5c-4a81da40243f\") " pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.452561 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.621226 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" event={"ID":"32e9d91a-703a-4f61-bcc5-bf59f465b22c","Type":"ContainerDied","Data":"478307892de766ce370827ed296adae5b0f4f3ef792f5deb6897d10de1251076"} Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.621275 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.621525 4857 scope.go:117] "RemoveContainer" containerID="fb0a5b15884d6f86c5a1102bfb8950fdb16af17a40fc0c122f559b8a81c163ce" Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.638289 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l"] Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.641327 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5cc86f69f5-l5c4l"] Nov 28 13:25:20 crc kubenswrapper[4857]: I1128 13:25:20.874220 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk"] Nov 28 13:25:20 crc kubenswrapper[4857]: W1128 13:25:20.880100 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2d725595_1b9e_4bb0_ad5c_4a81da40243f.slice/crio-4f6536739e1b551b1381d5f603992a2f1114f8f1695e7233626d6f6ce5ac1fd3 WatchSource:0}: Error finding container 4f6536739e1b551b1381d5f603992a2f1114f8f1695e7233626d6f6ce5ac1fd3: Status 404 returned error can't find the container with id 4f6536739e1b551b1381d5f603992a2f1114f8f1695e7233626d6f6ce5ac1fd3 Nov 28 13:25:21 crc kubenswrapper[4857]: I1128 13:25:21.633713 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" event={"ID":"2d725595-1b9e-4bb0-ad5c-4a81da40243f","Type":"ContainerStarted","Data":"bec10012b92bb3baacb5efdbc90e97c87791c18da2d8552d7b8554ac9a5949c7"} Nov 28 13:25:21 crc kubenswrapper[4857]: I1128 13:25:21.633789 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" event={"ID":"2d725595-1b9e-4bb0-ad5c-4a81da40243f","Type":"ContainerStarted","Data":"4f6536739e1b551b1381d5f603992a2f1114f8f1695e7233626d6f6ce5ac1fd3"} Nov 28 13:25:21 crc kubenswrapper[4857]: I1128 13:25:21.634002 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" Nov 28 13:25:21 crc kubenswrapper[4857]: I1128 13:25:21.640440 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" Nov 28 13:25:21 crc kubenswrapper[4857]: I1128 13:25:21.657029 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-59b8f58488-wbtsk" podStartSLOduration=7.657008168 podStartE2EDuration="7.657008168s" podCreationTimestamp="2025-11-28 13:25:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:25:21.656968257 +0000 UTC m=+413.684343424" watchObservedRunningTime="2025-11-28 13:25:21.657008168 +0000 UTC m=+413.684383345" Nov 28 13:25:21 crc kubenswrapper[4857]: I1128 13:25:21.689365 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-s4dd7" Nov 28 13:25:21 crc kubenswrapper[4857]: I1128 13:25:21.741350 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-s4dd7" Nov 28 13:25:22 crc kubenswrapper[4857]: I1128 13:25:22.012604 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-qcsnc" Nov 28 13:25:22 crc kubenswrapper[4857]: I1128 13:25:22.063533 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-t448t"] Nov 28 13:25:22 crc kubenswrapper[4857]: I1128 13:25:22.318367 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32e9d91a-703a-4f61-bcc5-bf59f465b22c" path="/var/lib/kubelet/pods/32e9d91a-703a-4f61-bcc5-bf59f465b22c/volumes" Nov 28 13:25:24 crc kubenswrapper[4857]: I1128 13:25:24.354191 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hjkx5" Nov 28 13:25:33 crc kubenswrapper[4857]: I1128 13:25:33.177785 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:25:33 crc kubenswrapper[4857]: I1128 13:25:33.178624 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:25:33 crc kubenswrapper[4857]: I1128 13:25:33.178706 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:25:33 crc kubenswrapper[4857]: I1128 13:25:33.180430 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a683cf012ac6614e5b0b9da4523354a6e417c246529cbc1812a88525172cf275"} pod="openshift-machine-config-operator/machine-config-daemon-jdgls" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 13:25:33 crc kubenswrapper[4857]: I1128 13:25:33.180587 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" containerID="cri-o://a683cf012ac6614e5b0b9da4523354a6e417c246529cbc1812a88525172cf275" gracePeriod=600 Nov 28 13:25:33 crc kubenswrapper[4857]: I1128 13:25:33.728447 4857 generic.go:334] "Generic (PLEG): container finished" podID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerID="a683cf012ac6614e5b0b9da4523354a6e417c246529cbc1812a88525172cf275" exitCode=0 Nov 28 13:25:33 crc kubenswrapper[4857]: I1128 13:25:33.728526 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerDied","Data":"a683cf012ac6614e5b0b9da4523354a6e417c246529cbc1812a88525172cf275"} Nov 28 13:25:33 crc kubenswrapper[4857]: I1128 13:25:33.728995 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerStarted","Data":"20b0d718a51ae1cc9cb720bd8e1bc59a468981664ecc595c5fc463fe7ad6b490"} Nov 28 13:25:33 crc kubenswrapper[4857]: I1128 13:25:33.729038 4857 scope.go:117] "RemoveContainer" containerID="ad405b78666fce5419e87eadbbd32a64e776f2994de3dc10493c05c902b43013" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.171696 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-t448t" podUID="5c497e5f-f362-48a4-bf34-833bfdc6de1b" containerName="registry" containerID="cri-o://c2a04d8226585d893f88c8b94e8de04f0b48cc41315ff31eccb846ce3d93fe7a" gracePeriod=30 Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.628174 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.744265 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5c497e5f-f362-48a4-bf34-833bfdc6de1b-registry-certificates\") pod \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.744431 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.744474 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5c497e5f-f362-48a4-bf34-833bfdc6de1b-ca-trust-extracted\") pod \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.744500 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5c497e5f-f362-48a4-bf34-833bfdc6de1b-bound-sa-token\") pod \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.744571 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5c497e5f-f362-48a4-bf34-833bfdc6de1b-registry-tls\") pod \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.744596 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5c497e5f-f362-48a4-bf34-833bfdc6de1b-trusted-ca\") pod \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.744635 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f64gl\" (UniqueName: \"kubernetes.io/projected/5c497e5f-f362-48a4-bf34-833bfdc6de1b-kube-api-access-f64gl\") pod \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.744681 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5c497e5f-f362-48a4-bf34-833bfdc6de1b-installation-pull-secrets\") pod \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\" (UID: \"5c497e5f-f362-48a4-bf34-833bfdc6de1b\") " Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.745741 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c497e5f-f362-48a4-bf34-833bfdc6de1b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "5c497e5f-f362-48a4-bf34-833bfdc6de1b" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.745825 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c497e5f-f362-48a4-bf34-833bfdc6de1b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "5c497e5f-f362-48a4-bf34-833bfdc6de1b" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.752040 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c497e5f-f362-48a4-bf34-833bfdc6de1b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "5c497e5f-f362-48a4-bf34-833bfdc6de1b" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.753056 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c497e5f-f362-48a4-bf34-833bfdc6de1b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "5c497e5f-f362-48a4-bf34-833bfdc6de1b" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.758037 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c497e5f-f362-48a4-bf34-833bfdc6de1b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "5c497e5f-f362-48a4-bf34-833bfdc6de1b" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.760586 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "5c497e5f-f362-48a4-bf34-833bfdc6de1b" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.761142 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c497e5f-f362-48a4-bf34-833bfdc6de1b-kube-api-access-f64gl" (OuterVolumeSpecName: "kube-api-access-f64gl") pod "5c497e5f-f362-48a4-bf34-833bfdc6de1b" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b"). InnerVolumeSpecName "kube-api-access-f64gl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.782593 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c497e5f-f362-48a4-bf34-833bfdc6de1b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "5c497e5f-f362-48a4-bf34-833bfdc6de1b" (UID: "5c497e5f-f362-48a4-bf34-833bfdc6de1b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.825339 4857 generic.go:334] "Generic (PLEG): container finished" podID="5c497e5f-f362-48a4-bf34-833bfdc6de1b" containerID="c2a04d8226585d893f88c8b94e8de04f0b48cc41315ff31eccb846ce3d93fe7a" exitCode=0 Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.825397 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-t448t" event={"ID":"5c497e5f-f362-48a4-bf34-833bfdc6de1b","Type":"ContainerDied","Data":"c2a04d8226585d893f88c8b94e8de04f0b48cc41315ff31eccb846ce3d93fe7a"} Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.825436 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-t448t" event={"ID":"5c497e5f-f362-48a4-bf34-833bfdc6de1b","Type":"ContainerDied","Data":"6135be6f10d18f221c065ff137bf00efaf2ea1abbae947f2de9ba6964446b79b"} Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.825393 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-t448t" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.825513 4857 scope.go:117] "RemoveContainer" containerID="c2a04d8226585d893f88c8b94e8de04f0b48cc41315ff31eccb846ce3d93fe7a" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.840835 4857 scope.go:117] "RemoveContainer" containerID="c2a04d8226585d893f88c8b94e8de04f0b48cc41315ff31eccb846ce3d93fe7a" Nov 28 13:25:47 crc kubenswrapper[4857]: E1128 13:25:47.841248 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2a04d8226585d893f88c8b94e8de04f0b48cc41315ff31eccb846ce3d93fe7a\": container with ID starting with c2a04d8226585d893f88c8b94e8de04f0b48cc41315ff31eccb846ce3d93fe7a not found: ID does not exist" containerID="c2a04d8226585d893f88c8b94e8de04f0b48cc41315ff31eccb846ce3d93fe7a" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.841278 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2a04d8226585d893f88c8b94e8de04f0b48cc41315ff31eccb846ce3d93fe7a"} err="failed to get container status \"c2a04d8226585d893f88c8b94e8de04f0b48cc41315ff31eccb846ce3d93fe7a\": rpc error: code = NotFound desc = could not find container \"c2a04d8226585d893f88c8b94e8de04f0b48cc41315ff31eccb846ce3d93fe7a\": container with ID starting with c2a04d8226585d893f88c8b94e8de04f0b48cc41315ff31eccb846ce3d93fe7a not found: ID does not exist" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.845606 4857 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5c497e5f-f362-48a4-bf34-833bfdc6de1b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.845636 4857 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5c497e5f-f362-48a4-bf34-833bfdc6de1b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.845677 4857 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5c497e5f-f362-48a4-bf34-833bfdc6de1b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.845687 4857 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5c497e5f-f362-48a4-bf34-833bfdc6de1b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.845698 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5c497e5f-f362-48a4-bf34-833bfdc6de1b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.845706 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f64gl\" (UniqueName: \"kubernetes.io/projected/5c497e5f-f362-48a4-bf34-833bfdc6de1b-kube-api-access-f64gl\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.845717 4857 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5c497e5f-f362-48a4-bf34-833bfdc6de1b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.854895 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-t448t"] Nov 28 13:25:47 crc kubenswrapper[4857]: I1128 13:25:47.859629 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-t448t"] Nov 28 13:25:48 crc kubenswrapper[4857]: I1128 13:25:48.321681 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c497e5f-f362-48a4-bf34-833bfdc6de1b" path="/var/lib/kubelet/pods/5c497e5f-f362-48a4-bf34-833bfdc6de1b/volumes" Nov 28 13:27:33 crc kubenswrapper[4857]: I1128 13:27:33.177792 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:27:33 crc kubenswrapper[4857]: I1128 13:27:33.178786 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:27:45 crc kubenswrapper[4857]: E1128 13:27:45.191732 4857 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/NetworkManager-dispatcher.service\": RecentStats: unable to find data in memory cache]" Nov 28 13:28:03 crc kubenswrapper[4857]: I1128 13:28:03.177960 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:28:03 crc kubenswrapper[4857]: I1128 13:28:03.178450 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:28:33 crc kubenswrapper[4857]: I1128 13:28:33.177907 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:28:33 crc kubenswrapper[4857]: I1128 13:28:33.178495 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:28:33 crc kubenswrapper[4857]: I1128 13:28:33.178557 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:28:33 crc kubenswrapper[4857]: I1128 13:28:33.179348 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"20b0d718a51ae1cc9cb720bd8e1bc59a468981664ecc595c5fc463fe7ad6b490"} pod="openshift-machine-config-operator/machine-config-daemon-jdgls" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 13:28:33 crc kubenswrapper[4857]: I1128 13:28:33.179450 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" containerID="cri-o://20b0d718a51ae1cc9cb720bd8e1bc59a468981664ecc595c5fc463fe7ad6b490" gracePeriod=600 Nov 28 13:28:33 crc kubenswrapper[4857]: I1128 13:28:33.919146 4857 generic.go:334] "Generic (PLEG): container finished" podID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerID="20b0d718a51ae1cc9cb720bd8e1bc59a468981664ecc595c5fc463fe7ad6b490" exitCode=0 Nov 28 13:28:33 crc kubenswrapper[4857]: I1128 13:28:33.919202 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerDied","Data":"20b0d718a51ae1cc9cb720bd8e1bc59a468981664ecc595c5fc463fe7ad6b490"} Nov 28 13:28:33 crc kubenswrapper[4857]: I1128 13:28:33.919731 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerStarted","Data":"a901816410aec0f9ca863ce99f942562aa81364027ff9914d88a03e73e8bd981"} Nov 28 13:28:33 crc kubenswrapper[4857]: I1128 13:28:33.919773 4857 scope.go:117] "RemoveContainer" containerID="a683cf012ac6614e5b0b9da4523354a6e417c246529cbc1812a88525172cf275" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.201800 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp"] Nov 28 13:30:00 crc kubenswrapper[4857]: E1128 13:30:00.202546 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c497e5f-f362-48a4-bf34-833bfdc6de1b" containerName="registry" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.202561 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c497e5f-f362-48a4-bf34-833bfdc6de1b" containerName="registry" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.202676 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c497e5f-f362-48a4-bf34-833bfdc6de1b" containerName="registry" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.203130 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.205588 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.206106 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.213582 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp"] Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.251152 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f753bc8a-8e44-4eba-89aa-26ce1a09831a-secret-volume\") pod \"collect-profiles-29405610-4jrrp\" (UID: \"f753bc8a-8e44-4eba-89aa-26ce1a09831a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.251527 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f753bc8a-8e44-4eba-89aa-26ce1a09831a-config-volume\") pod \"collect-profiles-29405610-4jrrp\" (UID: \"f753bc8a-8e44-4eba-89aa-26ce1a09831a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.251779 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dg522\" (UniqueName: \"kubernetes.io/projected/f753bc8a-8e44-4eba-89aa-26ce1a09831a-kube-api-access-dg522\") pod \"collect-profiles-29405610-4jrrp\" (UID: \"f753bc8a-8e44-4eba-89aa-26ce1a09831a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.353563 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f753bc8a-8e44-4eba-89aa-26ce1a09831a-config-volume\") pod \"collect-profiles-29405610-4jrrp\" (UID: \"f753bc8a-8e44-4eba-89aa-26ce1a09831a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.353873 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dg522\" (UniqueName: \"kubernetes.io/projected/f753bc8a-8e44-4eba-89aa-26ce1a09831a-kube-api-access-dg522\") pod \"collect-profiles-29405610-4jrrp\" (UID: \"f753bc8a-8e44-4eba-89aa-26ce1a09831a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.353942 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f753bc8a-8e44-4eba-89aa-26ce1a09831a-secret-volume\") pod \"collect-profiles-29405610-4jrrp\" (UID: \"f753bc8a-8e44-4eba-89aa-26ce1a09831a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.356197 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f753bc8a-8e44-4eba-89aa-26ce1a09831a-config-volume\") pod \"collect-profiles-29405610-4jrrp\" (UID: \"f753bc8a-8e44-4eba-89aa-26ce1a09831a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.360836 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f753bc8a-8e44-4eba-89aa-26ce1a09831a-secret-volume\") pod \"collect-profiles-29405610-4jrrp\" (UID: \"f753bc8a-8e44-4eba-89aa-26ce1a09831a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.390849 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dg522\" (UniqueName: \"kubernetes.io/projected/f753bc8a-8e44-4eba-89aa-26ce1a09831a-kube-api-access-dg522\") pod \"collect-profiles-29405610-4jrrp\" (UID: \"f753bc8a-8e44-4eba-89aa-26ce1a09831a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.524437 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.722023 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp"] Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.505451 4857 generic.go:334] "Generic (PLEG): container finished" podID="f753bc8a-8e44-4eba-89aa-26ce1a09831a" containerID="bb98cf60649531cab7deb85cb1ac365181360254079c82753e23a36dcc57d31a" exitCode=0 Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.505513 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp" event={"ID":"f753bc8a-8e44-4eba-89aa-26ce1a09831a","Type":"ContainerDied","Data":"bb98cf60649531cab7deb85cb1ac365181360254079c82753e23a36dcc57d31a"} Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.505783 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp" event={"ID":"f753bc8a-8e44-4eba-89aa-26ce1a09831a","Type":"ContainerStarted","Data":"c919906aa9f73e4c78369295e9e6d4b03c6f9ef243bb57e3902c0c3f6995d4ec"} Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.761069 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.781435 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f753bc8a-8e44-4eba-89aa-26ce1a09831a-secret-volume\") pod \"f753bc8a-8e44-4eba-89aa-26ce1a09831a\" (UID: \"f753bc8a-8e44-4eba-89aa-26ce1a09831a\") " Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.781890 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f753bc8a-8e44-4eba-89aa-26ce1a09831a-config-volume\") pod \"f753bc8a-8e44-4eba-89aa-26ce1a09831a\" (UID: \"f753bc8a-8e44-4eba-89aa-26ce1a09831a\") " Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.782077 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dg522\" (UniqueName: \"kubernetes.io/projected/f753bc8a-8e44-4eba-89aa-26ce1a09831a-kube-api-access-dg522\") pod \"f753bc8a-8e44-4eba-89aa-26ce1a09831a\" (UID: \"f753bc8a-8e44-4eba-89aa-26ce1a09831a\") " Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.783333 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f753bc8a-8e44-4eba-89aa-26ce1a09831a-config-volume" (OuterVolumeSpecName: "config-volume") pod "f753bc8a-8e44-4eba-89aa-26ce1a09831a" (UID: "f753bc8a-8e44-4eba-89aa-26ce1a09831a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.788908 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f753bc8a-8e44-4eba-89aa-26ce1a09831a-kube-api-access-dg522" (OuterVolumeSpecName: "kube-api-access-dg522") pod "f753bc8a-8e44-4eba-89aa-26ce1a09831a" (UID: "f753bc8a-8e44-4eba-89aa-26ce1a09831a"). InnerVolumeSpecName "kube-api-access-dg522". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.790880 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f753bc8a-8e44-4eba-89aa-26ce1a09831a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f753bc8a-8e44-4eba-89aa-26ce1a09831a" (UID: "f753bc8a-8e44-4eba-89aa-26ce1a09831a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.884380 4857 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f753bc8a-8e44-4eba-89aa-26ce1a09831a-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.884445 4857 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f753bc8a-8e44-4eba-89aa-26ce1a09831a-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.884471 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dg522\" (UniqueName: \"kubernetes.io/projected/f753bc8a-8e44-4eba-89aa-26ce1a09831a-kube-api-access-dg522\") on node \"crc\" DevicePath \"\"" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.519796 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp" event={"ID":"f753bc8a-8e44-4eba-89aa-26ce1a09831a","Type":"ContainerDied","Data":"c919906aa9f73e4c78369295e9e6d4b03c6f9ef243bb57e3902c0c3f6995d4ec"} Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.519847 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c919906aa9f73e4c78369295e9e6d4b03c6f9ef243bb57e3902c0c3f6995d4ec" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.520221 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-4jrrp" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.177807 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.178262 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.091962 4857 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.178633 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.178744 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.178085 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.179087 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.179167 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.180142 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a901816410aec0f9ca863ce99f942562aa81364027ff9914d88a03e73e8bd981"} pod="openshift-machine-config-operator/machine-config-daemon-jdgls" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.180216 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" containerID="cri-o://a901816410aec0f9ca863ce99f942562aa81364027ff9914d88a03e73e8bd981" gracePeriod=600 Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.114320 4857 generic.go:334] "Generic (PLEG): container finished" podID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerID="a901816410aec0f9ca863ce99f942562aa81364027ff9914d88a03e73e8bd981" exitCode=0 Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.114392 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerDied","Data":"a901816410aec0f9ca863ce99f942562aa81364027ff9914d88a03e73e8bd981"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.114733 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerStarted","Data":"2e8dd17747c47de8cbeb5abc3c3cfa11211aa6c3f675d9205fe31b2543798131"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.114801 4857 scope.go:117] "RemoveContainer" containerID="20b0d718a51ae1cc9cb720bd8e1bc59a468981664ecc595c5fc463fe7ad6b490" Nov 28 13:32:12 crc kubenswrapper[4857]: I1128 13:32:12.815030 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-w25ss"] Nov 28 13:32:12 crc kubenswrapper[4857]: I1128 13:32:12.816104 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovn-controller" containerID="cri-o://0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c" gracePeriod=30 Nov 28 13:32:12 crc kubenswrapper[4857]: I1128 13:32:12.816186 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="northd" containerID="cri-o://3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e" gracePeriod=30 Nov 28 13:32:12 crc kubenswrapper[4857]: I1128 13:32:12.816231 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee" gracePeriod=30 Nov 28 13:32:12 crc kubenswrapper[4857]: I1128 13:32:12.816199 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="sbdb" containerID="cri-o://29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1" gracePeriod=30 Nov 28 13:32:12 crc kubenswrapper[4857]: I1128 13:32:12.816285 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="kube-rbac-proxy-node" containerID="cri-o://ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439" gracePeriod=30 Nov 28 13:32:12 crc kubenswrapper[4857]: I1128 13:32:12.816326 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovn-acl-logging" containerID="cri-o://1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01" gracePeriod=30 Nov 28 13:32:12 crc kubenswrapper[4857]: I1128 13:32:12.816338 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="nbdb" containerID="cri-o://1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6" gracePeriod=30 Nov 28 13:32:12 crc kubenswrapper[4857]: I1128 13:32:12.850562 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovnkube-controller" containerID="cri-o://f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99" gracePeriod=30 Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.099483 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovnkube-controller/3.log" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.101828 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovn-acl-logging/0.log" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.102372 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovn-controller/0.log" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.102968 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133064 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-run-ovn\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133107 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-var-lib-openvswitch\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133139 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-slash\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133176 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-run-openvswitch\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133204 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-kubelet\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133218 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-run-ovn-kubernetes\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133230 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-run-netns\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133251 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nc22\" (UniqueName: \"kubernetes.io/projected/bf74e995-2208-43c6-b89d-10318f55cda8-kube-api-access-7nc22\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133264 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-etc-openvswitch\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133281 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-run-systemd\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133313 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-var-lib-cni-networks-ovn-kubernetes\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133337 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/bf74e995-2208-43c6-b89d-10318f55cda8-ovnkube-script-lib\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133362 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133385 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-node-log\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133425 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-node-log" (OuterVolumeSpecName: "node-log") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133452 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133457 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-log-socket\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133488 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-cni-netd\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133506 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133531 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/bf74e995-2208-43c6-b89d-10318f55cda8-ovn-node-metrics-cert\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133557 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-systemd-units\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133564 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133581 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/bf74e995-2208-43c6-b89d-10318f55cda8-ovnkube-config\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.134463 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-cni-bin\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.134490 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/bf74e995-2208-43c6-b89d-10318f55cda8-env-overrides\") pod \"bf74e995-2208-43c6-b89d-10318f55cda8\" (UID: \"bf74e995-2208-43c6-b89d-10318f55cda8\") " Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133616 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133634 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-log-socket" (OuterVolumeSpecName: "log-socket") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133644 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133656 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-slash" (OuterVolumeSpecName: "host-slash") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133686 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133685 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.133925 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.134254 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.134373 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf74e995-2208-43c6-b89d-10318f55cda8-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.134598 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.134521 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf74e995-2208-43c6-b89d-10318f55cda8-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.134919 4857 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-node-log\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.134931 4857 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-log-socket\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.134940 4857 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.134949 4857 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.134958 4857 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/bf74e995-2208-43c6-b89d-10318f55cda8-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.134965 4857 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.134973 4857 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.134981 4857 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.134988 4857 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-slash\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.134997 4857 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.135005 4857 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.135013 4857 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.135020 4857 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.135027 4857 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.135035 4857 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.135042 4857 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/bf74e995-2208-43c6-b89d-10318f55cda8-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.134956 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf74e995-2208-43c6-b89d-10318f55cda8-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.140023 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf74e995-2208-43c6-b89d-10318f55cda8-kube-api-access-7nc22" (OuterVolumeSpecName: "kube-api-access-7nc22") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "kube-api-access-7nc22". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.141714 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf74e995-2208-43c6-b89d-10318f55cda8-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.154816 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "bf74e995-2208-43c6-b89d-10318f55cda8" (UID: "bf74e995-2208-43c6-b89d-10318f55cda8"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.160669 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-nn575"] Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.161104 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovnkube-controller" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161149 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovnkube-controller" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.161168 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovnkube-controller" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161181 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovnkube-controller" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.161196 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovn-controller" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161209 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovn-controller" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.161226 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="northd" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161238 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="northd" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.161256 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="kubecfg-setup" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161267 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="kubecfg-setup" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.161287 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="sbdb" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161301 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="sbdb" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.161323 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="nbdb" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161338 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="nbdb" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.161369 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161382 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.161398 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovnkube-controller" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161410 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovnkube-controller" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.161426 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="kube-rbac-proxy-node" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161437 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="kube-rbac-proxy-node" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.161453 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f753bc8a-8e44-4eba-89aa-26ce1a09831a" containerName="collect-profiles" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161464 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f753bc8a-8e44-4eba-89aa-26ce1a09831a" containerName="collect-profiles" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.161482 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovn-acl-logging" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161494 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovn-acl-logging" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161614 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161625 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovnkube-controller" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161636 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovnkube-controller" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161648 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovnkube-controller" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161657 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f753bc8a-8e44-4eba-89aa-26ce1a09831a" containerName="collect-profiles" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161668 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="kube-rbac-proxy-node" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161682 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="sbdb" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161689 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovn-acl-logging" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161699 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovn-controller" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161708 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovnkube-controller" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161720 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="nbdb" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161733 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="northd" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.161863 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovnkube-controller" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161873 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovnkube-controller" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.161883 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovnkube-controller" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.161891 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovnkube-controller" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.162012 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" containerName="ovnkube-controller" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.165569 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.236690 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-systemd-units\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.236783 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-kubelet\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.236813 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-cni-netd\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.236840 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-var-lib-openvswitch\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.236872 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-run-ovn-kubernetes\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.236980 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-run-openvswitch\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.237037 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.237081 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-slash\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.237162 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-etc-openvswitch\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.237197 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-log-socket\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.237250 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-run-ovn\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.237280 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-node-log\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.237379 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-cni-bin\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.237426 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlczn\" (UniqueName: \"kubernetes.io/projected/7c3758fa-bb39-4a7f-878f-7487d2b24513-kube-api-access-nlczn\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.237448 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-run-systemd\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.237478 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-run-netns\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.237498 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7c3758fa-bb39-4a7f-878f-7487d2b24513-ovnkube-config\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.237540 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7c3758fa-bb39-4a7f-878f-7487d2b24513-ovnkube-script-lib\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.237572 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7c3758fa-bb39-4a7f-878f-7487d2b24513-ovn-node-metrics-cert\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.237600 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7c3758fa-bb39-4a7f-878f-7487d2b24513-env-overrides\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.237643 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nc22\" (UniqueName: \"kubernetes.io/projected/bf74e995-2208-43c6-b89d-10318f55cda8-kube-api-access-7nc22\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.237656 4857 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/bf74e995-2208-43c6-b89d-10318f55cda8-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.237666 4857 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/bf74e995-2208-43c6-b89d-10318f55cda8-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.237675 4857 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/bf74e995-2208-43c6-b89d-10318f55cda8-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.338413 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-slash\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.338493 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-etc-openvswitch\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.338526 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-log-socket\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.338556 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-slash\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.338614 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-run-ovn\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.338622 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-log-socket\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.338569 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-run-ovn\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.338705 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-etc-openvswitch\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.338894 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-node-log\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.338965 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-node-log\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339017 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-cni-bin\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339071 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlczn\" (UniqueName: \"kubernetes.io/projected/7c3758fa-bb39-4a7f-878f-7487d2b24513-kube-api-access-nlczn\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339084 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-cni-bin\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339112 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-run-systemd\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339158 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-run-netns\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339196 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7c3758fa-bb39-4a7f-878f-7487d2b24513-ovnkube-config\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339254 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7c3758fa-bb39-4a7f-878f-7487d2b24513-ovnkube-script-lib\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339302 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-run-netns\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339334 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7c3758fa-bb39-4a7f-878f-7487d2b24513-ovn-node-metrics-cert\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339385 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7c3758fa-bb39-4a7f-878f-7487d2b24513-env-overrides\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339427 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-systemd-units\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339489 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-kubelet\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339519 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-cni-netd\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339556 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-systemd-units\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339570 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-var-lib-openvswitch\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339602 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-kubelet\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339611 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-run-ovn-kubernetes\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339642 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-cni-netd\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339650 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-run-openvswitch\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339704 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-run-ovn-kubernetes\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339680 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-var-lib-openvswitch\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339739 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-run-openvswitch\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339717 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339829 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.341085 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7c3758fa-bb39-4a7f-878f-7487d2b24513-env-overrides\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.341266 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7c3758fa-bb39-4a7f-878f-7487d2b24513-ovnkube-config\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.339260 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7c3758fa-bb39-4a7f-878f-7487d2b24513-run-systemd\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.342378 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7c3758fa-bb39-4a7f-878f-7487d2b24513-ovnkube-script-lib\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.345257 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7c3758fa-bb39-4a7f-878f-7487d2b24513-ovn-node-metrics-cert\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.364389 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlczn\" (UniqueName: \"kubernetes.io/projected/7c3758fa-bb39-4a7f-878f-7487d2b24513-kube-api-access-nlczn\") pod \"ovnkube-node-nn575\" (UID: \"7c3758fa-bb39-4a7f-878f-7487d2b24513\") " pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.378336 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-tzg2g_1031bdc4-d6c6-4425-805b-506069f5667d/kube-multus/2.log" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.379174 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-tzg2g_1031bdc4-d6c6-4425-805b-506069f5667d/kube-multus/1.log" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.379235 4857 generic.go:334] "Generic (PLEG): container finished" podID="1031bdc4-d6c6-4425-805b-506069f5667d" containerID="255dccf9694a6567337261a11825e45d80269685bac4522118e6cb077d34971e" exitCode=2 Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.379328 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-tzg2g" event={"ID":"1031bdc4-d6c6-4425-805b-506069f5667d","Type":"ContainerDied","Data":"255dccf9694a6567337261a11825e45d80269685bac4522118e6cb077d34971e"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.379413 4857 scope.go:117] "RemoveContainer" containerID="81125daca139d6b77545a7ffee9064cd2fd693de61ae093e889ec72440be4856" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.381295 4857 scope.go:117] "RemoveContainer" containerID="255dccf9694a6567337261a11825e45d80269685bac4522118e6cb077d34971e" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.383691 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovnkube-controller/3.log" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.388372 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovn-acl-logging/0.log" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.389454 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-w25ss_bf74e995-2208-43c6-b89d-10318f55cda8/ovn-controller/0.log" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390471 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf74e995-2208-43c6-b89d-10318f55cda8" containerID="f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99" exitCode=0 Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390496 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf74e995-2208-43c6-b89d-10318f55cda8" containerID="29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1" exitCode=0 Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390505 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf74e995-2208-43c6-b89d-10318f55cda8" containerID="1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6" exitCode=0 Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390512 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf74e995-2208-43c6-b89d-10318f55cda8" containerID="3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e" exitCode=0 Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390521 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf74e995-2208-43c6-b89d-10318f55cda8" containerID="5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee" exitCode=0 Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390530 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf74e995-2208-43c6-b89d-10318f55cda8" containerID="ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439" exitCode=0 Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390537 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf74e995-2208-43c6-b89d-10318f55cda8" containerID="1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01" exitCode=143 Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390544 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf74e995-2208-43c6-b89d-10318f55cda8" containerID="0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c" exitCode=143 Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390563 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerDied","Data":"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390586 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerDied","Data":"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390597 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerDied","Data":"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390607 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerDied","Data":"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390615 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerDied","Data":"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390624 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerDied","Data":"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390634 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390644 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390649 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390654 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390658 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390663 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390668 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390673 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390677 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390682 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390688 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerDied","Data":"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390695 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390701 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390706 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390712 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390716 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390721 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390726 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390733 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390739 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390744 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390764 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerDied","Data":"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390772 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390778 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390783 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390788 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390792 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390798 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390803 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390808 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390814 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390819 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390826 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" event={"ID":"bf74e995-2208-43c6-b89d-10318f55cda8","Type":"ContainerDied","Data":"1f6a0725a20305bed641ca83f1d683236e70ef76e05daa6e5c7042edffd2ae57"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390834 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390840 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390845 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390850 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390856 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390861 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390866 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390871 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390876 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390882 4857 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809"} Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.390994 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-w25ss" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.425967 4857 scope.go:117] "RemoveContainer" containerID="f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.436533 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-w25ss"] Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.439976 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-w25ss"] Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.450391 4857 scope.go:117] "RemoveContainer" containerID="4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.478164 4857 scope.go:117] "RemoveContainer" containerID="29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.478310 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.495237 4857 scope.go:117] "RemoveContainer" containerID="1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6" Nov 28 13:32:13 crc kubenswrapper[4857]: W1128 13:32:13.501198 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7c3758fa_bb39_4a7f_878f_7487d2b24513.slice/crio-672c2ebc319c7ee98f5b44fa212d2e08d64c8fa213485067bc15a1636bbab200 WatchSource:0}: Error finding container 672c2ebc319c7ee98f5b44fa212d2e08d64c8fa213485067bc15a1636bbab200: Status 404 returned error can't find the container with id 672c2ebc319c7ee98f5b44fa212d2e08d64c8fa213485067bc15a1636bbab200 Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.508711 4857 scope.go:117] "RemoveContainer" containerID="3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.527878 4857 scope.go:117] "RemoveContainer" containerID="5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.551059 4857 scope.go:117] "RemoveContainer" containerID="ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.569178 4857 scope.go:117] "RemoveContainer" containerID="1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.591279 4857 scope.go:117] "RemoveContainer" containerID="0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.607391 4857 scope.go:117] "RemoveContainer" containerID="6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.623260 4857 scope.go:117] "RemoveContainer" containerID="f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.623815 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99\": container with ID starting with f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99 not found: ID does not exist" containerID="f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.623865 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99"} err="failed to get container status \"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99\": rpc error: code = NotFound desc = could not find container \"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99\": container with ID starting with f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.623892 4857 scope.go:117] "RemoveContainer" containerID="4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.624323 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f\": container with ID starting with 4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f not found: ID does not exist" containerID="4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.624374 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f"} err="failed to get container status \"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f\": rpc error: code = NotFound desc = could not find container \"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f\": container with ID starting with 4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.624406 4857 scope.go:117] "RemoveContainer" containerID="29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.624763 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\": container with ID starting with 29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1 not found: ID does not exist" containerID="29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.624791 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1"} err="failed to get container status \"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\": rpc error: code = NotFound desc = could not find container \"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\": container with ID starting with 29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.624809 4857 scope.go:117] "RemoveContainer" containerID="1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.626451 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\": container with ID starting with 1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6 not found: ID does not exist" containerID="1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.626492 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6"} err="failed to get container status \"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\": rpc error: code = NotFound desc = could not find container \"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\": container with ID starting with 1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.626522 4857 scope.go:117] "RemoveContainer" containerID="3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.626989 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\": container with ID starting with 3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e not found: ID does not exist" containerID="3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.627015 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e"} err="failed to get container status \"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\": rpc error: code = NotFound desc = could not find container \"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\": container with ID starting with 3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.627032 4857 scope.go:117] "RemoveContainer" containerID="5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.627403 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\": container with ID starting with 5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee not found: ID does not exist" containerID="5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.627433 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee"} err="failed to get container status \"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\": rpc error: code = NotFound desc = could not find container \"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\": container with ID starting with 5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.627450 4857 scope.go:117] "RemoveContainer" containerID="ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.627790 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\": container with ID starting with ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439 not found: ID does not exist" containerID="ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.627961 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439"} err="failed to get container status \"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\": rpc error: code = NotFound desc = could not find container \"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\": container with ID starting with ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.627980 4857 scope.go:117] "RemoveContainer" containerID="1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.628377 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\": container with ID starting with 1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01 not found: ID does not exist" containerID="1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.628396 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01"} err="failed to get container status \"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\": rpc error: code = NotFound desc = could not find container \"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\": container with ID starting with 1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.628409 4857 scope.go:117] "RemoveContainer" containerID="0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.628713 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\": container with ID starting with 0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c not found: ID does not exist" containerID="0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.628732 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c"} err="failed to get container status \"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\": rpc error: code = NotFound desc = could not find container \"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\": container with ID starting with 0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.628764 4857 scope.go:117] "RemoveContainer" containerID="6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809" Nov 28 13:32:13 crc kubenswrapper[4857]: E1128 13:32:13.630304 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\": container with ID starting with 6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809 not found: ID does not exist" containerID="6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.630324 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809"} err="failed to get container status \"6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\": rpc error: code = NotFound desc = could not find container \"6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\": container with ID starting with 6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.630338 4857 scope.go:117] "RemoveContainer" containerID="f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.633415 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99"} err="failed to get container status \"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99\": rpc error: code = NotFound desc = could not find container \"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99\": container with ID starting with f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.633454 4857 scope.go:117] "RemoveContainer" containerID="4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.634214 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f"} err="failed to get container status \"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f\": rpc error: code = NotFound desc = could not find container \"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f\": container with ID starting with 4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.634260 4857 scope.go:117] "RemoveContainer" containerID="29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.634664 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1"} err="failed to get container status \"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\": rpc error: code = NotFound desc = could not find container \"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\": container with ID starting with 29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.634699 4857 scope.go:117] "RemoveContainer" containerID="1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.635095 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6"} err="failed to get container status \"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\": rpc error: code = NotFound desc = could not find container \"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\": container with ID starting with 1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.635128 4857 scope.go:117] "RemoveContainer" containerID="3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.635551 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e"} err="failed to get container status \"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\": rpc error: code = NotFound desc = could not find container \"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\": container with ID starting with 3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.635584 4857 scope.go:117] "RemoveContainer" containerID="5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.636206 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee"} err="failed to get container status \"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\": rpc error: code = NotFound desc = could not find container \"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\": container with ID starting with 5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.636244 4857 scope.go:117] "RemoveContainer" containerID="ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.636619 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439"} err="failed to get container status \"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\": rpc error: code = NotFound desc = could not find container \"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\": container with ID starting with ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.636654 4857 scope.go:117] "RemoveContainer" containerID="1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.637010 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01"} err="failed to get container status \"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\": rpc error: code = NotFound desc = could not find container \"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\": container with ID starting with 1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.637040 4857 scope.go:117] "RemoveContainer" containerID="0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.637446 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c"} err="failed to get container status \"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\": rpc error: code = NotFound desc = could not find container \"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\": container with ID starting with 0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.637481 4857 scope.go:117] "RemoveContainer" containerID="6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.637932 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809"} err="failed to get container status \"6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\": rpc error: code = NotFound desc = could not find container \"6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\": container with ID starting with 6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.637965 4857 scope.go:117] "RemoveContainer" containerID="f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.638677 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99"} err="failed to get container status \"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99\": rpc error: code = NotFound desc = could not find container \"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99\": container with ID starting with f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.638712 4857 scope.go:117] "RemoveContainer" containerID="4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.639121 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f"} err="failed to get container status \"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f\": rpc error: code = NotFound desc = could not find container \"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f\": container with ID starting with 4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.639158 4857 scope.go:117] "RemoveContainer" containerID="29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.639477 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1"} err="failed to get container status \"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\": rpc error: code = NotFound desc = could not find container \"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\": container with ID starting with 29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.639499 4857 scope.go:117] "RemoveContainer" containerID="1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.639847 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6"} err="failed to get container status \"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\": rpc error: code = NotFound desc = could not find container \"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\": container with ID starting with 1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.639882 4857 scope.go:117] "RemoveContainer" containerID="3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.640216 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e"} err="failed to get container status \"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\": rpc error: code = NotFound desc = could not find container \"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\": container with ID starting with 3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.640234 4857 scope.go:117] "RemoveContainer" containerID="5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.640657 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee"} err="failed to get container status \"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\": rpc error: code = NotFound desc = could not find container \"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\": container with ID starting with 5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.640693 4857 scope.go:117] "RemoveContainer" containerID="ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.641184 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439"} err="failed to get container status \"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\": rpc error: code = NotFound desc = could not find container \"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\": container with ID starting with ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.641217 4857 scope.go:117] "RemoveContainer" containerID="1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.641523 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01"} err="failed to get container status \"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\": rpc error: code = NotFound desc = could not find container \"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\": container with ID starting with 1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.641539 4857 scope.go:117] "RemoveContainer" containerID="0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.641830 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c"} err="failed to get container status \"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\": rpc error: code = NotFound desc = could not find container \"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\": container with ID starting with 0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.641845 4857 scope.go:117] "RemoveContainer" containerID="6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.642178 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809"} err="failed to get container status \"6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\": rpc error: code = NotFound desc = could not find container \"6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\": container with ID starting with 6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.642212 4857 scope.go:117] "RemoveContainer" containerID="f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.642558 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99"} err="failed to get container status \"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99\": rpc error: code = NotFound desc = could not find container \"f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99\": container with ID starting with f3b298ad3b65d3e9f135b2a6aee92190429888167b714f332b826ba7751d9d99 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.642594 4857 scope.go:117] "RemoveContainer" containerID="4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.642983 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f"} err="failed to get container status \"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f\": rpc error: code = NotFound desc = could not find container \"4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f\": container with ID starting with 4aac3f2c5d5f7a87fd848e0d8cac3f5af56ae164768855b47ee7371f1d4a231f not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.643009 4857 scope.go:117] "RemoveContainer" containerID="29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.643292 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1"} err="failed to get container status \"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\": rpc error: code = NotFound desc = could not find container \"29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1\": container with ID starting with 29ace6ee6a07499efe97e934c47942b5d9786fa1f0568dfd0b42fcedf3ce38f1 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.643308 4857 scope.go:117] "RemoveContainer" containerID="1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.643548 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6"} err="failed to get container status \"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\": rpc error: code = NotFound desc = could not find container \"1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6\": container with ID starting with 1e1873428bb710ff1ca5c51cec8da11ca7d8dab84a503f8ad489cc0d682389b6 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.643565 4857 scope.go:117] "RemoveContainer" containerID="3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.644555 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e"} err="failed to get container status \"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\": rpc error: code = NotFound desc = could not find container \"3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e\": container with ID starting with 3c0dc0241dd3a69834bbb816feb8d32d4c94dee8b3794974cd63817860438b1e not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.644581 4857 scope.go:117] "RemoveContainer" containerID="5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.644985 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee"} err="failed to get container status \"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\": rpc error: code = NotFound desc = could not find container \"5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee\": container with ID starting with 5fe9f8981a12b26396d9302c13f339df64228d66d45b6110b14222b657ececee not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.645004 4857 scope.go:117] "RemoveContainer" containerID="ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.645377 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439"} err="failed to get container status \"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\": rpc error: code = NotFound desc = could not find container \"ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439\": container with ID starting with ffabb1c27de2cc2533c5831794a785a65941d997896d98e7ff67ddcf7334d439 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.645406 4857 scope.go:117] "RemoveContainer" containerID="1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.645696 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01"} err="failed to get container status \"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\": rpc error: code = NotFound desc = could not find container \"1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01\": container with ID starting with 1f070b160c5a8de3b704b7df2b8fb22f15d7370bd941b756e452cd61d3253f01 not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.645716 4857 scope.go:117] "RemoveContainer" containerID="0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.646009 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c"} err="failed to get container status \"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\": rpc error: code = NotFound desc = could not find container \"0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c\": container with ID starting with 0213b5565e0a28f8da8fb8509dd6850a2f558b884edb2144ec6204f75b04478c not found: ID does not exist" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.646027 4857 scope.go:117] "RemoveContainer" containerID="6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809" Nov 28 13:32:13 crc kubenswrapper[4857]: I1128 13:32:13.646302 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809"} err="failed to get container status \"6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\": rpc error: code = NotFound desc = could not find container \"6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809\": container with ID starting with 6810f5afc47cebec8c402608e7e7c47c231683eb0384b99a30f1267d0d3ec809 not found: ID does not exist" Nov 28 13:32:14 crc kubenswrapper[4857]: I1128 13:32:14.321036 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf74e995-2208-43c6-b89d-10318f55cda8" path="/var/lib/kubelet/pods/bf74e995-2208-43c6-b89d-10318f55cda8/volumes" Nov 28 13:32:14 crc kubenswrapper[4857]: I1128 13:32:14.396117 4857 generic.go:334] "Generic (PLEG): container finished" podID="7c3758fa-bb39-4a7f-878f-7487d2b24513" containerID="8e481824e6c602c434bf07bc3a58136234c9ceda4cb48552ea59be1efbd02538" exitCode=0 Nov 28 13:32:14 crc kubenswrapper[4857]: I1128 13:32:14.396179 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" event={"ID":"7c3758fa-bb39-4a7f-878f-7487d2b24513","Type":"ContainerDied","Data":"8e481824e6c602c434bf07bc3a58136234c9ceda4cb48552ea59be1efbd02538"} Nov 28 13:32:14 crc kubenswrapper[4857]: I1128 13:32:14.396206 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" event={"ID":"7c3758fa-bb39-4a7f-878f-7487d2b24513","Type":"ContainerStarted","Data":"672c2ebc319c7ee98f5b44fa212d2e08d64c8fa213485067bc15a1636bbab200"} Nov 28 13:32:14 crc kubenswrapper[4857]: I1128 13:32:14.400924 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-tzg2g_1031bdc4-d6c6-4425-805b-506069f5667d/kube-multus/2.log" Nov 28 13:32:14 crc kubenswrapper[4857]: I1128 13:32:14.401001 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-tzg2g" event={"ID":"1031bdc4-d6c6-4425-805b-506069f5667d","Type":"ContainerStarted","Data":"26329ea9fed34bbeab807dcb3fc7b3d3cdaf09d5ffb89eb8fb60b8229f2894c4"} Nov 28 13:32:15 crc kubenswrapper[4857]: I1128 13:32:15.410312 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" event={"ID":"7c3758fa-bb39-4a7f-878f-7487d2b24513","Type":"ContainerStarted","Data":"f05f643eb66aadfa0fd660148ac69f11876b6023247da5f3f0dab3f3e1afab92"} Nov 28 13:32:15 crc kubenswrapper[4857]: I1128 13:32:15.410993 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" event={"ID":"7c3758fa-bb39-4a7f-878f-7487d2b24513","Type":"ContainerStarted","Data":"2b7d43423e5f44415cdf5f137c23c8bfd9642ee2bf28115d1f9828d008d5b637"} Nov 28 13:32:15 crc kubenswrapper[4857]: I1128 13:32:15.411008 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" event={"ID":"7c3758fa-bb39-4a7f-878f-7487d2b24513","Type":"ContainerStarted","Data":"52366ce2a4a589e6e6786142cfd3a1b18501731b00df687610e47d99543c8eac"} Nov 28 13:32:15 crc kubenswrapper[4857]: I1128 13:32:15.411019 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" event={"ID":"7c3758fa-bb39-4a7f-878f-7487d2b24513","Type":"ContainerStarted","Data":"969a9d8749f446e78e8ee17d439e0672cf8f0ed6d74907ec000ced636b018b7c"} Nov 28 13:32:15 crc kubenswrapper[4857]: I1128 13:32:15.411031 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" event={"ID":"7c3758fa-bb39-4a7f-878f-7487d2b24513","Type":"ContainerStarted","Data":"3a80fe6bb3780681a7cfc11d0fa5c9d0c629ecd2afb908f48536579b3b1b7d5f"} Nov 28 13:32:16 crc kubenswrapper[4857]: I1128 13:32:16.418701 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" event={"ID":"7c3758fa-bb39-4a7f-878f-7487d2b24513","Type":"ContainerStarted","Data":"30eca134013a35a8202f3d4f1df0c6962f51d2c4e0db757d63655a21a13b7721"} Nov 28 13:32:18 crc kubenswrapper[4857]: I1128 13:32:18.438898 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" event={"ID":"7c3758fa-bb39-4a7f-878f-7487d2b24513","Type":"ContainerStarted","Data":"d02f6f8290acb211b80465cba7a4fd32121d3d0c9108c7f10fb52bbd41be0090"} Nov 28 13:32:20 crc kubenswrapper[4857]: I1128 13:32:20.725906 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-nfdxs"] Nov 28 13:32:20 crc kubenswrapper[4857]: I1128 13:32:20.727367 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:20 crc kubenswrapper[4857]: I1128 13:32:20.729928 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 28 13:32:20 crc kubenswrapper[4857]: I1128 13:32:20.730425 4857 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-jzntr" Nov 28 13:32:20 crc kubenswrapper[4857]: I1128 13:32:20.730941 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 28 13:32:20 crc kubenswrapper[4857]: I1128 13:32:20.731271 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 28 13:32:20 crc kubenswrapper[4857]: I1128 13:32:20.836901 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/584dbb33-6ac4-4e0f-aa70-d5b236c235fc-node-mnt\") pod \"crc-storage-crc-nfdxs\" (UID: \"584dbb33-6ac4-4e0f-aa70-d5b236c235fc\") " pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:20 crc kubenswrapper[4857]: I1128 13:32:20.837169 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlvq6\" (UniqueName: \"kubernetes.io/projected/584dbb33-6ac4-4e0f-aa70-d5b236c235fc-kube-api-access-vlvq6\") pod \"crc-storage-crc-nfdxs\" (UID: \"584dbb33-6ac4-4e0f-aa70-d5b236c235fc\") " pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:20 crc kubenswrapper[4857]: I1128 13:32:20.837376 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/584dbb33-6ac4-4e0f-aa70-d5b236c235fc-crc-storage\") pod \"crc-storage-crc-nfdxs\" (UID: \"584dbb33-6ac4-4e0f-aa70-d5b236c235fc\") " pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:20 crc kubenswrapper[4857]: I1128 13:32:20.938210 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlvq6\" (UniqueName: \"kubernetes.io/projected/584dbb33-6ac4-4e0f-aa70-d5b236c235fc-kube-api-access-vlvq6\") pod \"crc-storage-crc-nfdxs\" (UID: \"584dbb33-6ac4-4e0f-aa70-d5b236c235fc\") " pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:20 crc kubenswrapper[4857]: I1128 13:32:20.938291 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/584dbb33-6ac4-4e0f-aa70-d5b236c235fc-crc-storage\") pod \"crc-storage-crc-nfdxs\" (UID: \"584dbb33-6ac4-4e0f-aa70-d5b236c235fc\") " pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:20 crc kubenswrapper[4857]: I1128 13:32:20.938332 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/584dbb33-6ac4-4e0f-aa70-d5b236c235fc-node-mnt\") pod \"crc-storage-crc-nfdxs\" (UID: \"584dbb33-6ac4-4e0f-aa70-d5b236c235fc\") " pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:20 crc kubenswrapper[4857]: I1128 13:32:20.938611 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/584dbb33-6ac4-4e0f-aa70-d5b236c235fc-node-mnt\") pod \"crc-storage-crc-nfdxs\" (UID: \"584dbb33-6ac4-4e0f-aa70-d5b236c235fc\") " pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:20 crc kubenswrapper[4857]: I1128 13:32:20.939744 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/584dbb33-6ac4-4e0f-aa70-d5b236c235fc-crc-storage\") pod \"crc-storage-crc-nfdxs\" (UID: \"584dbb33-6ac4-4e0f-aa70-d5b236c235fc\") " pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:20 crc kubenswrapper[4857]: I1128 13:32:20.962102 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlvq6\" (UniqueName: \"kubernetes.io/projected/584dbb33-6ac4-4e0f-aa70-d5b236c235fc-kube-api-access-vlvq6\") pod \"crc-storage-crc-nfdxs\" (UID: \"584dbb33-6ac4-4e0f-aa70-d5b236c235fc\") " pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:21 crc kubenswrapper[4857]: I1128 13:32:21.057224 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:21 crc kubenswrapper[4857]: E1128 13:32:21.097140 4857 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-nfdxs_crc-storage_584dbb33-6ac4-4e0f-aa70-d5b236c235fc_0(b9e68a66005e478c9741bef7f65b0cdd3af13a91f1c43920d30438d840eabcaf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 13:32:21 crc kubenswrapper[4857]: E1128 13:32:21.097230 4857 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-nfdxs_crc-storage_584dbb33-6ac4-4e0f-aa70-d5b236c235fc_0(b9e68a66005e478c9741bef7f65b0cdd3af13a91f1c43920d30438d840eabcaf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:21 crc kubenswrapper[4857]: E1128 13:32:21.097257 4857 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-nfdxs_crc-storage_584dbb33-6ac4-4e0f-aa70-d5b236c235fc_0(b9e68a66005e478c9741bef7f65b0cdd3af13a91f1c43920d30438d840eabcaf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:21 crc kubenswrapper[4857]: E1128 13:32:21.097325 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-nfdxs_crc-storage(584dbb33-6ac4-4e0f-aa70-d5b236c235fc)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-nfdxs_crc-storage(584dbb33-6ac4-4e0f-aa70-d5b236c235fc)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-nfdxs_crc-storage_584dbb33-6ac4-4e0f-aa70-d5b236c235fc_0(b9e68a66005e478c9741bef7f65b0cdd3af13a91f1c43920d30438d840eabcaf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-nfdxs" podUID="584dbb33-6ac4-4e0f-aa70-d5b236c235fc" Nov 28 13:32:21 crc kubenswrapper[4857]: I1128 13:32:21.468816 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" event={"ID":"7c3758fa-bb39-4a7f-878f-7487d2b24513","Type":"ContainerStarted","Data":"37e2076c3f29d695ee481f98b20295841b6d0da25db82989ac00ada8faeacddf"} Nov 28 13:32:22 crc kubenswrapper[4857]: I1128 13:32:22.042622 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" podStartSLOduration=9.04259673 podStartE2EDuration="9.04259673s" podCreationTimestamp="2025-11-28 13:32:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:32:21.501241608 +0000 UTC m=+833.528616785" watchObservedRunningTime="2025-11-28 13:32:22.04259673 +0000 UTC m=+834.069971897" Nov 28 13:32:22 crc kubenswrapper[4857]: I1128 13:32:22.046980 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-nfdxs"] Nov 28 13:32:22 crc kubenswrapper[4857]: I1128 13:32:22.047151 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:22 crc kubenswrapper[4857]: I1128 13:32:22.048211 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:22 crc kubenswrapper[4857]: E1128 13:32:22.072930 4857 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-nfdxs_crc-storage_584dbb33-6ac4-4e0f-aa70-d5b236c235fc_0(9688a9a56aa637bdb46f204b54eab3255579abe99b990bf25b68e8d060b82bff): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 13:32:22 crc kubenswrapper[4857]: E1128 13:32:22.073074 4857 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-nfdxs_crc-storage_584dbb33-6ac4-4e0f-aa70-d5b236c235fc_0(9688a9a56aa637bdb46f204b54eab3255579abe99b990bf25b68e8d060b82bff): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:22 crc kubenswrapper[4857]: E1128 13:32:22.073106 4857 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-nfdxs_crc-storage_584dbb33-6ac4-4e0f-aa70-d5b236c235fc_0(9688a9a56aa637bdb46f204b54eab3255579abe99b990bf25b68e8d060b82bff): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:22 crc kubenswrapper[4857]: E1128 13:32:22.073170 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-nfdxs_crc-storage(584dbb33-6ac4-4e0f-aa70-d5b236c235fc)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-nfdxs_crc-storage(584dbb33-6ac4-4e0f-aa70-d5b236c235fc)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-nfdxs_crc-storage_584dbb33-6ac4-4e0f-aa70-d5b236c235fc_0(9688a9a56aa637bdb46f204b54eab3255579abe99b990bf25b68e8d060b82bff): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-nfdxs" podUID="584dbb33-6ac4-4e0f-aa70-d5b236c235fc" Nov 28 13:32:22 crc kubenswrapper[4857]: I1128 13:32:22.474383 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:22 crc kubenswrapper[4857]: I1128 13:32:22.474432 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:22 crc kubenswrapper[4857]: I1128 13:32:22.474449 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:22 crc kubenswrapper[4857]: I1128 13:32:22.502602 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:22 crc kubenswrapper[4857]: I1128 13:32:22.507618 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:34 crc kubenswrapper[4857]: I1128 13:32:34.309093 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:34 crc kubenswrapper[4857]: I1128 13:32:34.310103 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:34 crc kubenswrapper[4857]: I1128 13:32:34.734006 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-nfdxs"] Nov 28 13:32:34 crc kubenswrapper[4857]: I1128 13:32:34.738396 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 13:32:35 crc kubenswrapper[4857]: I1128 13:32:35.561079 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-nfdxs" event={"ID":"584dbb33-6ac4-4e0f-aa70-d5b236c235fc","Type":"ContainerStarted","Data":"5b36c91a220ad15ca8189b5019a1d76baf522a5adbddf28d6f0c5026f3c8ccf1"} Nov 28 13:32:36 crc kubenswrapper[4857]: I1128 13:32:36.567148 4857 generic.go:334] "Generic (PLEG): container finished" podID="584dbb33-6ac4-4e0f-aa70-d5b236c235fc" containerID="00e763ac29aad3af4762ca151dfa9d0fded7c9b8b122a7ca21f7b3c9853b9314" exitCode=0 Nov 28 13:32:36 crc kubenswrapper[4857]: I1128 13:32:36.567201 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-nfdxs" event={"ID":"584dbb33-6ac4-4e0f-aa70-d5b236c235fc","Type":"ContainerDied","Data":"00e763ac29aad3af4762ca151dfa9d0fded7c9b8b122a7ca21f7b3c9853b9314"} Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.909432 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.971910 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/584dbb33-6ac4-4e0f-aa70-d5b236c235fc-node-mnt\") pod \"584dbb33-6ac4-4e0f-aa70-d5b236c235fc\" (UID: \"584dbb33-6ac4-4e0f-aa70-d5b236c235fc\") " Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.972042 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/584dbb33-6ac4-4e0f-aa70-d5b236c235fc-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "584dbb33-6ac4-4e0f-aa70-d5b236c235fc" (UID: "584dbb33-6ac4-4e0f-aa70-d5b236c235fc"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.972061 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlvq6\" (UniqueName: \"kubernetes.io/projected/584dbb33-6ac4-4e0f-aa70-d5b236c235fc-kube-api-access-vlvq6\") pod \"584dbb33-6ac4-4e0f-aa70-d5b236c235fc\" (UID: \"584dbb33-6ac4-4e0f-aa70-d5b236c235fc\") " Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.972133 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/584dbb33-6ac4-4e0f-aa70-d5b236c235fc-crc-storage\") pod \"584dbb33-6ac4-4e0f-aa70-d5b236c235fc\" (UID: \"584dbb33-6ac4-4e0f-aa70-d5b236c235fc\") " Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.972552 4857 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/584dbb33-6ac4-4e0f-aa70-d5b236c235fc-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.976206 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/584dbb33-6ac4-4e0f-aa70-d5b236c235fc-kube-api-access-vlvq6" (OuterVolumeSpecName: "kube-api-access-vlvq6") pod "584dbb33-6ac4-4e0f-aa70-d5b236c235fc" (UID: "584dbb33-6ac4-4e0f-aa70-d5b236c235fc"). InnerVolumeSpecName "kube-api-access-vlvq6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.983774 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/584dbb33-6ac4-4e0f-aa70-d5b236c235fc-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "584dbb33-6ac4-4e0f-aa70-d5b236c235fc" (UID: "584dbb33-6ac4-4e0f-aa70-d5b236c235fc"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.073451 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlvq6\" (UniqueName: \"kubernetes.io/projected/584dbb33-6ac4-4e0f-aa70-d5b236c235fc-kube-api-access-vlvq6\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.073483 4857 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/584dbb33-6ac4-4e0f-aa70-d5b236c235fc-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.585305 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-nfdxs" event={"ID":"584dbb33-6ac4-4e0f-aa70-d5b236c235fc","Type":"ContainerDied","Data":"5b36c91a220ad15ca8189b5019a1d76baf522a5adbddf28d6f0c5026f3c8ccf1"} Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.585356 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b36c91a220ad15ca8189b5019a1d76baf522a5adbddf28d6f0c5026f3c8ccf1" Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.585436 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nfdxs" Nov 28 13:32:43 crc kubenswrapper[4857]: I1128 13:32:43.506106 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-nn575" Nov 28 13:32:45 crc kubenswrapper[4857]: I1128 13:32:45.548493 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2"] Nov 28 13:32:45 crc kubenswrapper[4857]: E1128 13:32:45.549274 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="584dbb33-6ac4-4e0f-aa70-d5b236c235fc" containerName="storage" Nov 28 13:32:45 crc kubenswrapper[4857]: I1128 13:32:45.549302 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="584dbb33-6ac4-4e0f-aa70-d5b236c235fc" containerName="storage" Nov 28 13:32:45 crc kubenswrapper[4857]: I1128 13:32:45.549530 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="584dbb33-6ac4-4e0f-aa70-d5b236c235fc" containerName="storage" Nov 28 13:32:45 crc kubenswrapper[4857]: I1128 13:32:45.551052 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2" Nov 28 13:32:45 crc kubenswrapper[4857]: I1128 13:32:45.552710 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 13:32:45 crc kubenswrapper[4857]: I1128 13:32:45.554551 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2"] Nov 28 13:32:45 crc kubenswrapper[4857]: I1128 13:32:45.679162 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2b673ee4-31c9-4a17-a188-5aa63017fcf7-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2\" (UID: \"2b673ee4-31c9-4a17-a188-5aa63017fcf7\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2" Nov 28 13:32:45 crc kubenswrapper[4857]: I1128 13:32:45.679240 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r86pz\" (UniqueName: \"kubernetes.io/projected/2b673ee4-31c9-4a17-a188-5aa63017fcf7-kube-api-access-r86pz\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2\" (UID: \"2b673ee4-31c9-4a17-a188-5aa63017fcf7\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2" Nov 28 13:32:45 crc kubenswrapper[4857]: I1128 13:32:45.679270 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2b673ee4-31c9-4a17-a188-5aa63017fcf7-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2\" (UID: \"2b673ee4-31c9-4a17-a188-5aa63017fcf7\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2" Nov 28 13:32:45 crc kubenswrapper[4857]: I1128 13:32:45.780874 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2b673ee4-31c9-4a17-a188-5aa63017fcf7-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2\" (UID: \"2b673ee4-31c9-4a17-a188-5aa63017fcf7\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2" Nov 28 13:32:45 crc kubenswrapper[4857]: I1128 13:32:45.781425 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r86pz\" (UniqueName: \"kubernetes.io/projected/2b673ee4-31c9-4a17-a188-5aa63017fcf7-kube-api-access-r86pz\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2\" (UID: \"2b673ee4-31c9-4a17-a188-5aa63017fcf7\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2" Nov 28 13:32:45 crc kubenswrapper[4857]: I1128 13:32:45.781523 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2b673ee4-31c9-4a17-a188-5aa63017fcf7-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2\" (UID: \"2b673ee4-31c9-4a17-a188-5aa63017fcf7\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2" Nov 28 13:32:45 crc kubenswrapper[4857]: I1128 13:32:45.781427 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2b673ee4-31c9-4a17-a188-5aa63017fcf7-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2\" (UID: \"2b673ee4-31c9-4a17-a188-5aa63017fcf7\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2" Nov 28 13:32:45 crc kubenswrapper[4857]: I1128 13:32:45.782023 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2b673ee4-31c9-4a17-a188-5aa63017fcf7-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2\" (UID: \"2b673ee4-31c9-4a17-a188-5aa63017fcf7\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2" Nov 28 13:32:45 crc kubenswrapper[4857]: I1128 13:32:45.798561 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r86pz\" (UniqueName: \"kubernetes.io/projected/2b673ee4-31c9-4a17-a188-5aa63017fcf7-kube-api-access-r86pz\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2\" (UID: \"2b673ee4-31c9-4a17-a188-5aa63017fcf7\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2" Nov 28 13:32:45 crc kubenswrapper[4857]: I1128 13:32:45.901640 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2" Nov 28 13:32:46 crc kubenswrapper[4857]: I1128 13:32:46.126107 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2"] Nov 28 13:32:46 crc kubenswrapper[4857]: I1128 13:32:46.629284 4857 generic.go:334] "Generic (PLEG): container finished" podID="2b673ee4-31c9-4a17-a188-5aa63017fcf7" containerID="5749ec067a7b4fe7e256193403c3a5bf5269947e9f009a603ce60145731e4b5f" exitCode=0 Nov 28 13:32:46 crc kubenswrapper[4857]: I1128 13:32:46.629344 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2" event={"ID":"2b673ee4-31c9-4a17-a188-5aa63017fcf7","Type":"ContainerDied","Data":"5749ec067a7b4fe7e256193403c3a5bf5269947e9f009a603ce60145731e4b5f"} Nov 28 13:32:46 crc kubenswrapper[4857]: I1128 13:32:46.629578 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2" event={"ID":"2b673ee4-31c9-4a17-a188-5aa63017fcf7","Type":"ContainerStarted","Data":"11f7b14c13759b774ed73f1d50d541476f6282c11e5d13b357fa2186a28c1e84"} Nov 28 13:32:47 crc kubenswrapper[4857]: I1128 13:32:47.910735 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-k2r86"] Nov 28 13:32:47 crc kubenswrapper[4857]: I1128 13:32:47.912550 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k2r86" Nov 28 13:32:47 crc kubenswrapper[4857]: I1128 13:32:47.964559 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-k2r86"] Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.114086 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f529d1db-433d-4505-9436-5f0b88340ec7-catalog-content\") pod \"redhat-operators-k2r86\" (UID: \"f529d1db-433d-4505-9436-5f0b88340ec7\") " pod="openshift-marketplace/redhat-operators-k2r86" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.114245 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hj8h\" (UniqueName: \"kubernetes.io/projected/f529d1db-433d-4505-9436-5f0b88340ec7-kube-api-access-2hj8h\") pod \"redhat-operators-k2r86\" (UID: \"f529d1db-433d-4505-9436-5f0b88340ec7\") " pod="openshift-marketplace/redhat-operators-k2r86" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.114321 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f529d1db-433d-4505-9436-5f0b88340ec7-utilities\") pod \"redhat-operators-k2r86\" (UID: \"f529d1db-433d-4505-9436-5f0b88340ec7\") " pod="openshift-marketplace/redhat-operators-k2r86" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.214977 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f529d1db-433d-4505-9436-5f0b88340ec7-utilities\") pod \"redhat-operators-k2r86\" (UID: \"f529d1db-433d-4505-9436-5f0b88340ec7\") " pod="openshift-marketplace/redhat-operators-k2r86" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.215026 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f529d1db-433d-4505-9436-5f0b88340ec7-catalog-content\") pod \"redhat-operators-k2r86\" (UID: \"f529d1db-433d-4505-9436-5f0b88340ec7\") " pod="openshift-marketplace/redhat-operators-k2r86" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.215055 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hj8h\" (UniqueName: \"kubernetes.io/projected/f529d1db-433d-4505-9436-5f0b88340ec7-kube-api-access-2hj8h\") pod \"redhat-operators-k2r86\" (UID: \"f529d1db-433d-4505-9436-5f0b88340ec7\") " pod="openshift-marketplace/redhat-operators-k2r86" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.215791 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f529d1db-433d-4505-9436-5f0b88340ec7-catalog-content\") pod \"redhat-operators-k2r86\" (UID: \"f529d1db-433d-4505-9436-5f0b88340ec7\") " pod="openshift-marketplace/redhat-operators-k2r86" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.215785 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f529d1db-433d-4505-9436-5f0b88340ec7-utilities\") pod \"redhat-operators-k2r86\" (UID: \"f529d1db-433d-4505-9436-5f0b88340ec7\") " pod="openshift-marketplace/redhat-operators-k2r86" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.249535 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hj8h\" (UniqueName: \"kubernetes.io/projected/f529d1db-433d-4505-9436-5f0b88340ec7-kube-api-access-2hj8h\") pod \"redhat-operators-k2r86\" (UID: \"f529d1db-433d-4505-9436-5f0b88340ec7\") " pod="openshift-marketplace/redhat-operators-k2r86" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.271344 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k2r86" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.450871 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-k2r86"] Nov 28 13:32:48 crc kubenswrapper[4857]: W1128 13:32:48.452213 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf529d1db_433d_4505_9436_5f0b88340ec7.slice/crio-d100c5b0f6437b250a6c7ef032d7f2c5e008b09e883695598db504ffdf0e99cc WatchSource:0}: Error finding container d100c5b0f6437b250a6c7ef032d7f2c5e008b09e883695598db504ffdf0e99cc: Status 404 returned error can't find the container with id d100c5b0f6437b250a6c7ef032d7f2c5e008b09e883695598db504ffdf0e99cc Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.645596 4857 generic.go:334] "Generic (PLEG): container finished" podID="2b673ee4-31c9-4a17-a188-5aa63017fcf7" containerID="b53f7320e7b293d86f3ec11d78bbb463bdaaaa64ea5fb78062c3c5f42722e06d" exitCode=0 Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.645670 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2" event={"ID":"2b673ee4-31c9-4a17-a188-5aa63017fcf7","Type":"ContainerDied","Data":"b53f7320e7b293d86f3ec11d78bbb463bdaaaa64ea5fb78062c3c5f42722e06d"} Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.647172 4857 generic.go:334] "Generic (PLEG): container finished" podID="f529d1db-433d-4505-9436-5f0b88340ec7" containerID="cc021b8b71750270aa7bc8af104a98ca9d5896f0e5f98099195ca3640e3e6c64" exitCode=0 Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.647195 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k2r86" event={"ID":"f529d1db-433d-4505-9436-5f0b88340ec7","Type":"ContainerDied","Data":"cc021b8b71750270aa7bc8af104a98ca9d5896f0e5f98099195ca3640e3e6c64"} Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.647209 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k2r86" event={"ID":"f529d1db-433d-4505-9436-5f0b88340ec7","Type":"ContainerStarted","Data":"d100c5b0f6437b250a6c7ef032d7f2c5e008b09e883695598db504ffdf0e99cc"} Nov 28 13:32:49 crc kubenswrapper[4857]: I1128 13:32:49.655706 4857 generic.go:334] "Generic (PLEG): container finished" podID="2b673ee4-31c9-4a17-a188-5aa63017fcf7" containerID="4334a5b02b31f1c3b65461272cbd49232d6805812d67d8502fb8599b338bad29" exitCode=0 Nov 28 13:32:49 crc kubenswrapper[4857]: I1128 13:32:49.656354 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2" event={"ID":"2b673ee4-31c9-4a17-a188-5aa63017fcf7","Type":"ContainerDied","Data":"4334a5b02b31f1c3b65461272cbd49232d6805812d67d8502fb8599b338bad29"} Nov 28 13:32:49 crc kubenswrapper[4857]: I1128 13:32:49.659508 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k2r86" event={"ID":"f529d1db-433d-4505-9436-5f0b88340ec7","Type":"ContainerStarted","Data":"54ae539fd241fe10f7a6f886d7ce0d0e00d7f6e32b5853dc19f22bfb7586bccd"} Nov 28 13:32:50 crc kubenswrapper[4857]: I1128 13:32:50.669702 4857 generic.go:334] "Generic (PLEG): container finished" podID="f529d1db-433d-4505-9436-5f0b88340ec7" containerID="54ae539fd241fe10f7a6f886d7ce0d0e00d7f6e32b5853dc19f22bfb7586bccd" exitCode=0 Nov 28 13:32:50 crc kubenswrapper[4857]: I1128 13:32:50.670242 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k2r86" event={"ID":"f529d1db-433d-4505-9436-5f0b88340ec7","Type":"ContainerDied","Data":"54ae539fd241fe10f7a6f886d7ce0d0e00d7f6e32b5853dc19f22bfb7586bccd"} Nov 28 13:32:51 crc kubenswrapper[4857]: I1128 13:32:51.125614 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2" Nov 28 13:32:51 crc kubenswrapper[4857]: I1128 13:32:51.170653 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r86pz\" (UniqueName: \"kubernetes.io/projected/2b673ee4-31c9-4a17-a188-5aa63017fcf7-kube-api-access-r86pz\") pod \"2b673ee4-31c9-4a17-a188-5aa63017fcf7\" (UID: \"2b673ee4-31c9-4a17-a188-5aa63017fcf7\") " Nov 28 13:32:51 crc kubenswrapper[4857]: I1128 13:32:51.170803 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2b673ee4-31c9-4a17-a188-5aa63017fcf7-util\") pod \"2b673ee4-31c9-4a17-a188-5aa63017fcf7\" (UID: \"2b673ee4-31c9-4a17-a188-5aa63017fcf7\") " Nov 28 13:32:51 crc kubenswrapper[4857]: I1128 13:32:51.170861 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2b673ee4-31c9-4a17-a188-5aa63017fcf7-bundle\") pod \"2b673ee4-31c9-4a17-a188-5aa63017fcf7\" (UID: \"2b673ee4-31c9-4a17-a188-5aa63017fcf7\") " Nov 28 13:32:51 crc kubenswrapper[4857]: I1128 13:32:51.171620 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b673ee4-31c9-4a17-a188-5aa63017fcf7-bundle" (OuterVolumeSpecName: "bundle") pod "2b673ee4-31c9-4a17-a188-5aa63017fcf7" (UID: "2b673ee4-31c9-4a17-a188-5aa63017fcf7"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:32:51 crc kubenswrapper[4857]: I1128 13:32:51.177629 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b673ee4-31c9-4a17-a188-5aa63017fcf7-kube-api-access-r86pz" (OuterVolumeSpecName: "kube-api-access-r86pz") pod "2b673ee4-31c9-4a17-a188-5aa63017fcf7" (UID: "2b673ee4-31c9-4a17-a188-5aa63017fcf7"). InnerVolumeSpecName "kube-api-access-r86pz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:32:51 crc kubenswrapper[4857]: I1128 13:32:51.193667 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b673ee4-31c9-4a17-a188-5aa63017fcf7-util" (OuterVolumeSpecName: "util") pod "2b673ee4-31c9-4a17-a188-5aa63017fcf7" (UID: "2b673ee4-31c9-4a17-a188-5aa63017fcf7"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:32:51 crc kubenswrapper[4857]: I1128 13:32:51.272072 4857 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2b673ee4-31c9-4a17-a188-5aa63017fcf7-util\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:51 crc kubenswrapper[4857]: I1128 13:32:51.272100 4857 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2b673ee4-31c9-4a17-a188-5aa63017fcf7-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:51 crc kubenswrapper[4857]: I1128 13:32:51.272115 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r86pz\" (UniqueName: \"kubernetes.io/projected/2b673ee4-31c9-4a17-a188-5aa63017fcf7-kube-api-access-r86pz\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:51 crc kubenswrapper[4857]: I1128 13:32:51.680056 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2" event={"ID":"2b673ee4-31c9-4a17-a188-5aa63017fcf7","Type":"ContainerDied","Data":"11f7b14c13759b774ed73f1d50d541476f6282c11e5d13b357fa2186a28c1e84"} Nov 28 13:32:51 crc kubenswrapper[4857]: I1128 13:32:51.680390 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="11f7b14c13759b774ed73f1d50d541476f6282c11e5d13b357fa2186a28c1e84" Nov 28 13:32:51 crc kubenswrapper[4857]: I1128 13:32:51.680087 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2" Nov 28 13:32:51 crc kubenswrapper[4857]: I1128 13:32:51.684891 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k2r86" event={"ID":"f529d1db-433d-4505-9436-5f0b88340ec7","Type":"ContainerStarted","Data":"ec901324e0323cb29de5bc7e5330a4a51a3849396102c6f4abbbe08fb10c6d18"} Nov 28 13:32:51 crc kubenswrapper[4857]: I1128 13:32:51.705048 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-k2r86" podStartSLOduration=2.186907964 podStartE2EDuration="4.705026481s" podCreationTimestamp="2025-11-28 13:32:47 +0000 UTC" firstStartedPulling="2025-11-28 13:32:48.648219791 +0000 UTC m=+860.675594958" lastFinishedPulling="2025-11-28 13:32:51.166338298 +0000 UTC m=+863.193713475" observedRunningTime="2025-11-28 13:32:51.702339035 +0000 UTC m=+863.729714232" watchObservedRunningTime="2025-11-28 13:32:51.705026481 +0000 UTC m=+863.732401658" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.417445 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-ds5zt"] Nov 28 13:32:53 crc kubenswrapper[4857]: E1128 13:32:53.417696 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b673ee4-31c9-4a17-a188-5aa63017fcf7" containerName="pull" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.417713 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b673ee4-31c9-4a17-a188-5aa63017fcf7" containerName="pull" Nov 28 13:32:53 crc kubenswrapper[4857]: E1128 13:32:53.417729 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b673ee4-31c9-4a17-a188-5aa63017fcf7" containerName="util" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.417736 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b673ee4-31c9-4a17-a188-5aa63017fcf7" containerName="util" Nov 28 13:32:53 crc kubenswrapper[4857]: E1128 13:32:53.417770 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b673ee4-31c9-4a17-a188-5aa63017fcf7" containerName="extract" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.417780 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b673ee4-31c9-4a17-a188-5aa63017fcf7" containerName="extract" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.417914 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b673ee4-31c9-4a17-a188-5aa63017fcf7" containerName="extract" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.418357 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-ds5zt" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.420356 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.420476 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.425929 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-ds5zt"] Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.429652 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-4c9mg" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.516529 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4b62l\" (UniqueName: \"kubernetes.io/projected/fcfc3e02-2c67-4436-9719-fc3fe00bb2e0-kube-api-access-4b62l\") pod \"nmstate-operator-5b5b58f5c8-ds5zt\" (UID: \"fcfc3e02-2c67-4436-9719-fc3fe00bb2e0\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-ds5zt" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.617993 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4b62l\" (UniqueName: \"kubernetes.io/projected/fcfc3e02-2c67-4436-9719-fc3fe00bb2e0-kube-api-access-4b62l\") pod \"nmstate-operator-5b5b58f5c8-ds5zt\" (UID: \"fcfc3e02-2c67-4436-9719-fc3fe00bb2e0\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-ds5zt" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.635662 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4b62l\" (UniqueName: \"kubernetes.io/projected/fcfc3e02-2c67-4436-9719-fc3fe00bb2e0-kube-api-access-4b62l\") pod \"nmstate-operator-5b5b58f5c8-ds5zt\" (UID: \"fcfc3e02-2c67-4436-9719-fc3fe00bb2e0\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-ds5zt" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.733408 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-ds5zt" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.225995 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-ds5zt"] Nov 28 13:32:54 crc kubenswrapper[4857]: W1128 13:32:54.241185 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfcfc3e02_2c67_4436_9719_fc3fe00bb2e0.slice/crio-87cb2b8417d5ddf66756d4984eaed0816a357c7a8c2149afd7e2a3f8154f21ef WatchSource:0}: Error finding container 87cb2b8417d5ddf66756d4984eaed0816a357c7a8c2149afd7e2a3f8154f21ef: Status 404 returned error can't find the container with id 87cb2b8417d5ddf66756d4984eaed0816a357c7a8c2149afd7e2a3f8154f21ef Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.700110 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-ds5zt" event={"ID":"fcfc3e02-2c67-4436-9719-fc3fe00bb2e0","Type":"ContainerStarted","Data":"87cb2b8417d5ddf66756d4984eaed0816a357c7a8c2149afd7e2a3f8154f21ef"} Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.272258 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-k2r86" Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.272575 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-k2r86" Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.331141 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-k2r86" Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.732149 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-ds5zt" event={"ID":"fcfc3e02-2c67-4436-9719-fc3fe00bb2e0","Type":"ContainerStarted","Data":"d803fc953d6cbc7ec2d613922317f22c2c84e8405b6f35b1ec52a8f855582603"} Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.758646 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-ds5zt" podStartSLOduration=1.80783623 podStartE2EDuration="5.75862412s" podCreationTimestamp="2025-11-28 13:32:53 +0000 UTC" firstStartedPulling="2025-11-28 13:32:54.242944187 +0000 UTC m=+866.270319364" lastFinishedPulling="2025-11-28 13:32:58.193732087 +0000 UTC m=+870.221107254" observedRunningTime="2025-11-28 13:32:58.752504187 +0000 UTC m=+870.779879364" watchObservedRunningTime="2025-11-28 13:32:58.75862412 +0000 UTC m=+870.785999287" Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.829993 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-k2r86" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.496101 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-k2r86"] Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.716931 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-qlhzf"] Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.718075 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-qlhzf" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.720372 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-9kv7q" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.724002 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xchwf"] Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.725062 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xchwf" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.726954 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.738685 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-qlhzf"] Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.775636 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-njxfb"] Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.776828 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-njxfb" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.784570 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xchwf"] Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.815373 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rhdf\" (UniqueName: \"kubernetes.io/projected/07c3f6d4-5b62-4140-a9d8-a5f26841a487-kube-api-access-9rhdf\") pod \"nmstate-metrics-7f946cbc9-qlhzf\" (UID: \"07c3f6d4-5b62-4140-a9d8-a5f26841a487\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-qlhzf" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.815451 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lk2hn\" (UniqueName: \"kubernetes.io/projected/8aad4947-867d-42d1-af5d-d9d8284d21e7-kube-api-access-lk2hn\") pod \"nmstate-webhook-5f6d4c5ccb-xchwf\" (UID: \"8aad4947-867d-42d1-af5d-d9d8284d21e7\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xchwf" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.815502 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8aad4947-867d-42d1-af5d-d9d8284d21e7-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-xchwf\" (UID: \"8aad4947-867d-42d1-af5d-d9d8284d21e7\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xchwf" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.882584 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-ht4fl"] Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.885694 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-ht4fl" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.888226 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.888467 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-klgxj" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.888334 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.896496 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-ht4fl"] Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.918305 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/146db7a4-f901-4160-8053-7fe4f8063e22-nmstate-lock\") pod \"nmstate-handler-njxfb\" (UID: \"146db7a4-f901-4160-8053-7fe4f8063e22\") " pod="openshift-nmstate/nmstate-handler-njxfb" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.918368 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/146db7a4-f901-4160-8053-7fe4f8063e22-ovs-socket\") pod \"nmstate-handler-njxfb\" (UID: \"146db7a4-f901-4160-8053-7fe4f8063e22\") " pod="openshift-nmstate/nmstate-handler-njxfb" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.918412 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rhdf\" (UniqueName: \"kubernetes.io/projected/07c3f6d4-5b62-4140-a9d8-a5f26841a487-kube-api-access-9rhdf\") pod \"nmstate-metrics-7f946cbc9-qlhzf\" (UID: \"07c3f6d4-5b62-4140-a9d8-a5f26841a487\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-qlhzf" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.918454 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lk2hn\" (UniqueName: \"kubernetes.io/projected/8aad4947-867d-42d1-af5d-d9d8284d21e7-kube-api-access-lk2hn\") pod \"nmstate-webhook-5f6d4c5ccb-xchwf\" (UID: \"8aad4947-867d-42d1-af5d-d9d8284d21e7\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xchwf" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.918479 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8n27b\" (UniqueName: \"kubernetes.io/projected/146db7a4-f901-4160-8053-7fe4f8063e22-kube-api-access-8n27b\") pod \"nmstate-handler-njxfb\" (UID: \"146db7a4-f901-4160-8053-7fe4f8063e22\") " pod="openshift-nmstate/nmstate-handler-njxfb" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.918532 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8aad4947-867d-42d1-af5d-d9d8284d21e7-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-xchwf\" (UID: \"8aad4947-867d-42d1-af5d-d9d8284d21e7\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xchwf" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.918576 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/146db7a4-f901-4160-8053-7fe4f8063e22-dbus-socket\") pod \"nmstate-handler-njxfb\" (UID: \"146db7a4-f901-4160-8053-7fe4f8063e22\") " pod="openshift-nmstate/nmstate-handler-njxfb" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.943895 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8aad4947-867d-42d1-af5d-d9d8284d21e7-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-xchwf\" (UID: \"8aad4947-867d-42d1-af5d-d9d8284d21e7\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xchwf" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.947952 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rhdf\" (UniqueName: \"kubernetes.io/projected/07c3f6d4-5b62-4140-a9d8-a5f26841a487-kube-api-access-9rhdf\") pod \"nmstate-metrics-7f946cbc9-qlhzf\" (UID: \"07c3f6d4-5b62-4140-a9d8-a5f26841a487\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-qlhzf" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.956871 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lk2hn\" (UniqueName: \"kubernetes.io/projected/8aad4947-867d-42d1-af5d-d9d8284d21e7-kube-api-access-lk2hn\") pod \"nmstate-webhook-5f6d4c5ccb-xchwf\" (UID: \"8aad4947-867d-42d1-af5d-d9d8284d21e7\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xchwf" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.020036 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/3528f191-ca62-461c-a93f-8fb8758d76af-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-ht4fl\" (UID: \"3528f191-ca62-461c-a93f-8fb8758d76af\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-ht4fl" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.020090 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/146db7a4-f901-4160-8053-7fe4f8063e22-ovs-socket\") pod \"nmstate-handler-njxfb\" (UID: \"146db7a4-f901-4160-8053-7fe4f8063e22\") " pod="openshift-nmstate/nmstate-handler-njxfb" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.020131 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8n27b\" (UniqueName: \"kubernetes.io/projected/146db7a4-f901-4160-8053-7fe4f8063e22-kube-api-access-8n27b\") pod \"nmstate-handler-njxfb\" (UID: \"146db7a4-f901-4160-8053-7fe4f8063e22\") " pod="openshift-nmstate/nmstate-handler-njxfb" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.020158 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/3528f191-ca62-461c-a93f-8fb8758d76af-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-ht4fl\" (UID: \"3528f191-ca62-461c-a93f-8fb8758d76af\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-ht4fl" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.020184 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/146db7a4-f901-4160-8053-7fe4f8063e22-dbus-socket\") pod \"nmstate-handler-njxfb\" (UID: \"146db7a4-f901-4160-8053-7fe4f8063e22\") " pod="openshift-nmstate/nmstate-handler-njxfb" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.020213 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/146db7a4-f901-4160-8053-7fe4f8063e22-nmstate-lock\") pod \"nmstate-handler-njxfb\" (UID: \"146db7a4-f901-4160-8053-7fe4f8063e22\") " pod="openshift-nmstate/nmstate-handler-njxfb" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.020234 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c65ch\" (UniqueName: \"kubernetes.io/projected/3528f191-ca62-461c-a93f-8fb8758d76af-kube-api-access-c65ch\") pod \"nmstate-console-plugin-7fbb5f6569-ht4fl\" (UID: \"3528f191-ca62-461c-a93f-8fb8758d76af\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-ht4fl" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.020314 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/146db7a4-f901-4160-8053-7fe4f8063e22-ovs-socket\") pod \"nmstate-handler-njxfb\" (UID: \"146db7a4-f901-4160-8053-7fe4f8063e22\") " pod="openshift-nmstate/nmstate-handler-njxfb" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.020841 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/146db7a4-f901-4160-8053-7fe4f8063e22-dbus-socket\") pod \"nmstate-handler-njxfb\" (UID: \"146db7a4-f901-4160-8053-7fe4f8063e22\") " pod="openshift-nmstate/nmstate-handler-njxfb" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.020889 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/146db7a4-f901-4160-8053-7fe4f8063e22-nmstate-lock\") pod \"nmstate-handler-njxfb\" (UID: \"146db7a4-f901-4160-8053-7fe4f8063e22\") " pod="openshift-nmstate/nmstate-handler-njxfb" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.035998 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8n27b\" (UniqueName: \"kubernetes.io/projected/146db7a4-f901-4160-8053-7fe4f8063e22-kube-api-access-8n27b\") pod \"nmstate-handler-njxfb\" (UID: \"146db7a4-f901-4160-8053-7fe4f8063e22\") " pod="openshift-nmstate/nmstate-handler-njxfb" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.040583 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-qlhzf" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.060525 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xchwf" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.083040 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-8d58ccdfb-bszcc"] Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.083804 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.097293 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-njxfb" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.103608 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-8d58ccdfb-bszcc"] Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.124132 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c65ch\" (UniqueName: \"kubernetes.io/projected/3528f191-ca62-461c-a93f-8fb8758d76af-kube-api-access-c65ch\") pod \"nmstate-console-plugin-7fbb5f6569-ht4fl\" (UID: \"3528f191-ca62-461c-a93f-8fb8758d76af\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-ht4fl" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.124194 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/3528f191-ca62-461c-a93f-8fb8758d76af-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-ht4fl\" (UID: \"3528f191-ca62-461c-a93f-8fb8758d76af\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-ht4fl" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.124265 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/3528f191-ca62-461c-a93f-8fb8758d76af-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-ht4fl\" (UID: \"3528f191-ca62-461c-a93f-8fb8758d76af\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-ht4fl" Nov 28 13:33:00 crc kubenswrapper[4857]: E1128 13:33:00.124498 4857 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 28 13:33:00 crc kubenswrapper[4857]: E1128 13:33:00.124585 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3528f191-ca62-461c-a93f-8fb8758d76af-plugin-serving-cert podName:3528f191-ca62-461c-a93f-8fb8758d76af nodeName:}" failed. No retries permitted until 2025-11-28 13:33:00.62455975 +0000 UTC m=+872.651934917 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/3528f191-ca62-461c-a93f-8fb8758d76af-plugin-serving-cert") pod "nmstate-console-plugin-7fbb5f6569-ht4fl" (UID: "3528f191-ca62-461c-a93f-8fb8758d76af") : secret "plugin-serving-cert" not found Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.125383 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/3528f191-ca62-461c-a93f-8fb8758d76af-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-ht4fl\" (UID: \"3528f191-ca62-461c-a93f-8fb8758d76af\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-ht4fl" Nov 28 13:33:00 crc kubenswrapper[4857]: W1128 13:33:00.138372 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod146db7a4_f901_4160_8053_7fe4f8063e22.slice/crio-fdec209c6d66c6c855d735a8a04eb171dfeac8a62a85f654b1dd38f81e498ee0 WatchSource:0}: Error finding container fdec209c6d66c6c855d735a8a04eb171dfeac8a62a85f654b1dd38f81e498ee0: Status 404 returned error can't find the container with id fdec209c6d66c6c855d735a8a04eb171dfeac8a62a85f654b1dd38f81e498ee0 Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.143265 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c65ch\" (UniqueName: \"kubernetes.io/projected/3528f191-ca62-461c-a93f-8fb8758d76af-kube-api-access-c65ch\") pod \"nmstate-console-plugin-7fbb5f6569-ht4fl\" (UID: \"3528f191-ca62-461c-a93f-8fb8758d76af\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-ht4fl" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.226003 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/48d6810c-358b-4539-8497-65c7a54037af-oauth-serving-cert\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.226462 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6q96\" (UniqueName: \"kubernetes.io/projected/48d6810c-358b-4539-8497-65c7a54037af-kube-api-access-q6q96\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.226700 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/48d6810c-358b-4539-8497-65c7a54037af-trusted-ca-bundle\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.226730 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/48d6810c-358b-4539-8497-65c7a54037af-console-oauth-config\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.226790 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/48d6810c-358b-4539-8497-65c7a54037af-console-serving-cert\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.226816 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/48d6810c-358b-4539-8497-65c7a54037af-service-ca\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.226854 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/48d6810c-358b-4539-8497-65c7a54037af-console-config\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.339525 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6q96\" (UniqueName: \"kubernetes.io/projected/48d6810c-358b-4539-8497-65c7a54037af-kube-api-access-q6q96\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.339613 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/48d6810c-358b-4539-8497-65c7a54037af-trusted-ca-bundle\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.339637 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/48d6810c-358b-4539-8497-65c7a54037af-console-oauth-config\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.339665 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/48d6810c-358b-4539-8497-65c7a54037af-console-serving-cert\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.339685 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/48d6810c-358b-4539-8497-65c7a54037af-service-ca\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.339709 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/48d6810c-358b-4539-8497-65c7a54037af-console-config\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.339733 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/48d6810c-358b-4539-8497-65c7a54037af-oauth-serving-cert\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.340812 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/48d6810c-358b-4539-8497-65c7a54037af-service-ca\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.342047 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/48d6810c-358b-4539-8497-65c7a54037af-trusted-ca-bundle\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.342957 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/48d6810c-358b-4539-8497-65c7a54037af-console-config\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.343107 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/48d6810c-358b-4539-8497-65c7a54037af-oauth-serving-cert\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.345037 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/48d6810c-358b-4539-8497-65c7a54037af-console-serving-cert\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.345435 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/48d6810c-358b-4539-8497-65c7a54037af-console-oauth-config\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.354386 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6q96\" (UniqueName: \"kubernetes.io/projected/48d6810c-358b-4539-8497-65c7a54037af-kube-api-access-q6q96\") pod \"console-8d58ccdfb-bszcc\" (UID: \"48d6810c-358b-4539-8497-65c7a54037af\") " pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.428587 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.579446 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-qlhzf"] Nov 28 13:33:00 crc kubenswrapper[4857]: W1128 13:33:00.585544 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod07c3f6d4_5b62_4140_a9d8_a5f26841a487.slice/crio-01c4a9521157a11f0fb2d036e402310a26dec8bd134cf92b4f49e544b516e7d0 WatchSource:0}: Error finding container 01c4a9521157a11f0fb2d036e402310a26dec8bd134cf92b4f49e544b516e7d0: Status 404 returned error can't find the container with id 01c4a9521157a11f0fb2d036e402310a26dec8bd134cf92b4f49e544b516e7d0 Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.592842 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xchwf"] Nov 28 13:33:00 crc kubenswrapper[4857]: W1128 13:33:00.598240 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8aad4947_867d_42d1_af5d_d9d8284d21e7.slice/crio-829a889f020abdead2d06a602a4f21ba3325dd16349ab73c4972c721349db9e9 WatchSource:0}: Error finding container 829a889f020abdead2d06a602a4f21ba3325dd16349ab73c4972c721349db9e9: Status 404 returned error can't find the container with id 829a889f020abdead2d06a602a4f21ba3325dd16349ab73c4972c721349db9e9 Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.646046 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/3528f191-ca62-461c-a93f-8fb8758d76af-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-ht4fl\" (UID: \"3528f191-ca62-461c-a93f-8fb8758d76af\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-ht4fl" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.651502 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/3528f191-ca62-461c-a93f-8fb8758d76af-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-ht4fl\" (UID: \"3528f191-ca62-461c-a93f-8fb8758d76af\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-ht4fl" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.734526 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-8d58ccdfb-bszcc"] Nov 28 13:33:00 crc kubenswrapper[4857]: W1128 13:33:00.739992 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48d6810c_358b_4539_8497_65c7a54037af.slice/crio-27069cd5de1ea4c770fd442188440a1c413ed74bcf7496c562051255523b28d3 WatchSource:0}: Error finding container 27069cd5de1ea4c770fd442188440a1c413ed74bcf7496c562051255523b28d3: Status 404 returned error can't find the container with id 27069cd5de1ea4c770fd442188440a1c413ed74bcf7496c562051255523b28d3 Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.750529 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-8d58ccdfb-bszcc" event={"ID":"48d6810c-358b-4539-8497-65c7a54037af","Type":"ContainerStarted","Data":"27069cd5de1ea4c770fd442188440a1c413ed74bcf7496c562051255523b28d3"} Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.751839 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xchwf" event={"ID":"8aad4947-867d-42d1-af5d-d9d8284d21e7","Type":"ContainerStarted","Data":"829a889f020abdead2d06a602a4f21ba3325dd16349ab73c4972c721349db9e9"} Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.752743 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-qlhzf" event={"ID":"07c3f6d4-5b62-4140-a9d8-a5f26841a487","Type":"ContainerStarted","Data":"01c4a9521157a11f0fb2d036e402310a26dec8bd134cf92b4f49e544b516e7d0"} Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.755100 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-njxfb" event={"ID":"146db7a4-f901-4160-8053-7fe4f8063e22","Type":"ContainerStarted","Data":"fdec209c6d66c6c855d735a8a04eb171dfeac8a62a85f654b1dd38f81e498ee0"} Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.755320 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-k2r86" podUID="f529d1db-433d-4505-9436-5f0b88340ec7" containerName="registry-server" containerID="cri-o://ec901324e0323cb29de5bc7e5330a4a51a3849396102c6f4abbbe08fb10c6d18" gracePeriod=2 Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.805068 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-ht4fl" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.978515 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-ht4fl"] Nov 28 13:33:00 crc kubenswrapper[4857]: W1128 13:33:00.983599 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3528f191_ca62_461c_a93f_8fb8758d76af.slice/crio-2894caebc772027cdbb03e09deaba05931c556a06920a28310a10667ae9ae4f1 WatchSource:0}: Error finding container 2894caebc772027cdbb03e09deaba05931c556a06920a28310a10667ae9ae4f1: Status 404 returned error can't find the container with id 2894caebc772027cdbb03e09deaba05931c556a06920a28310a10667ae9ae4f1 Nov 28 13:33:01 crc kubenswrapper[4857]: I1128 13:33:01.773146 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-ht4fl" event={"ID":"3528f191-ca62-461c-a93f-8fb8758d76af","Type":"ContainerStarted","Data":"2894caebc772027cdbb03e09deaba05931c556a06920a28310a10667ae9ae4f1"} Nov 28 13:33:01 crc kubenswrapper[4857]: I1128 13:33:01.775232 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-8d58ccdfb-bszcc" event={"ID":"48d6810c-358b-4539-8497-65c7a54037af","Type":"ContainerStarted","Data":"fe97e6c04cd7c162cf9eb4a5a58ad073ec9264b345ccca39c63f334b2865c407"} Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.728013 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9lsfz"] Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.730593 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9lsfz" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.744701 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9lsfz"] Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.799656 4857 generic.go:334] "Generic (PLEG): container finished" podID="f529d1db-433d-4505-9436-5f0b88340ec7" containerID="ec901324e0323cb29de5bc7e5330a4a51a3849396102c6f4abbbe08fb10c6d18" exitCode=0 Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.799793 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k2r86" event={"ID":"f529d1db-433d-4505-9436-5f0b88340ec7","Type":"ContainerDied","Data":"ec901324e0323cb29de5bc7e5330a4a51a3849396102c6f4abbbe08fb10c6d18"} Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.827856 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-8d58ccdfb-bszcc" podStartSLOduration=3.827834724 podStartE2EDuration="3.827834724s" podCreationTimestamp="2025-11-28 13:33:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:33:03.818765968 +0000 UTC m=+875.846141155" watchObservedRunningTime="2025-11-28 13:33:03.827834724 +0000 UTC m=+875.855209891" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.923762 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41c2321d-3333-4385-8078-11d398c724ff-catalog-content\") pod \"redhat-marketplace-9lsfz\" (UID: \"41c2321d-3333-4385-8078-11d398c724ff\") " pod="openshift-marketplace/redhat-marketplace-9lsfz" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.923868 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41c2321d-3333-4385-8078-11d398c724ff-utilities\") pod \"redhat-marketplace-9lsfz\" (UID: \"41c2321d-3333-4385-8078-11d398c724ff\") " pod="openshift-marketplace/redhat-marketplace-9lsfz" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.923917 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gf6qp\" (UniqueName: \"kubernetes.io/projected/41c2321d-3333-4385-8078-11d398c724ff-kube-api-access-gf6qp\") pod \"redhat-marketplace-9lsfz\" (UID: \"41c2321d-3333-4385-8078-11d398c724ff\") " pod="openshift-marketplace/redhat-marketplace-9lsfz" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.025508 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41c2321d-3333-4385-8078-11d398c724ff-catalog-content\") pod \"redhat-marketplace-9lsfz\" (UID: \"41c2321d-3333-4385-8078-11d398c724ff\") " pod="openshift-marketplace/redhat-marketplace-9lsfz" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.025651 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41c2321d-3333-4385-8078-11d398c724ff-utilities\") pod \"redhat-marketplace-9lsfz\" (UID: \"41c2321d-3333-4385-8078-11d398c724ff\") " pod="openshift-marketplace/redhat-marketplace-9lsfz" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.025699 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gf6qp\" (UniqueName: \"kubernetes.io/projected/41c2321d-3333-4385-8078-11d398c724ff-kube-api-access-gf6qp\") pod \"redhat-marketplace-9lsfz\" (UID: \"41c2321d-3333-4385-8078-11d398c724ff\") " pod="openshift-marketplace/redhat-marketplace-9lsfz" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.027412 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41c2321d-3333-4385-8078-11d398c724ff-catalog-content\") pod \"redhat-marketplace-9lsfz\" (UID: \"41c2321d-3333-4385-8078-11d398c724ff\") " pod="openshift-marketplace/redhat-marketplace-9lsfz" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.027689 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41c2321d-3333-4385-8078-11d398c724ff-utilities\") pod \"redhat-marketplace-9lsfz\" (UID: \"41c2321d-3333-4385-8078-11d398c724ff\") " pod="openshift-marketplace/redhat-marketplace-9lsfz" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.051904 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gf6qp\" (UniqueName: \"kubernetes.io/projected/41c2321d-3333-4385-8078-11d398c724ff-kube-api-access-gf6qp\") pod \"redhat-marketplace-9lsfz\" (UID: \"41c2321d-3333-4385-8078-11d398c724ff\") " pod="openshift-marketplace/redhat-marketplace-9lsfz" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.056923 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k2r86" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.228160 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f529d1db-433d-4505-9436-5f0b88340ec7-catalog-content\") pod \"f529d1db-433d-4505-9436-5f0b88340ec7\" (UID: \"f529d1db-433d-4505-9436-5f0b88340ec7\") " Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.228224 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f529d1db-433d-4505-9436-5f0b88340ec7-utilities\") pod \"f529d1db-433d-4505-9436-5f0b88340ec7\" (UID: \"f529d1db-433d-4505-9436-5f0b88340ec7\") " Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.228259 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2hj8h\" (UniqueName: \"kubernetes.io/projected/f529d1db-433d-4505-9436-5f0b88340ec7-kube-api-access-2hj8h\") pod \"f529d1db-433d-4505-9436-5f0b88340ec7\" (UID: \"f529d1db-433d-4505-9436-5f0b88340ec7\") " Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.230043 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f529d1db-433d-4505-9436-5f0b88340ec7-utilities" (OuterVolumeSpecName: "utilities") pod "f529d1db-433d-4505-9436-5f0b88340ec7" (UID: "f529d1db-433d-4505-9436-5f0b88340ec7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.249164 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f529d1db-433d-4505-9436-5f0b88340ec7-kube-api-access-2hj8h" (OuterVolumeSpecName: "kube-api-access-2hj8h") pod "f529d1db-433d-4505-9436-5f0b88340ec7" (UID: "f529d1db-433d-4505-9436-5f0b88340ec7"). InnerVolumeSpecName "kube-api-access-2hj8h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.329950 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2hj8h\" (UniqueName: \"kubernetes.io/projected/f529d1db-433d-4505-9436-5f0b88340ec7-kube-api-access-2hj8h\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.329977 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f529d1db-433d-4505-9436-5f0b88340ec7-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.351961 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9lsfz" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.366965 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f529d1db-433d-4505-9436-5f0b88340ec7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f529d1db-433d-4505-9436-5f0b88340ec7" (UID: "f529d1db-433d-4505-9436-5f0b88340ec7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.431086 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f529d1db-433d-4505-9436-5f0b88340ec7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.806910 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k2r86" event={"ID":"f529d1db-433d-4505-9436-5f0b88340ec7","Type":"ContainerDied","Data":"d100c5b0f6437b250a6c7ef032d7f2c5e008b09e883695598db504ffdf0e99cc"} Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.806971 4857 scope.go:117] "RemoveContainer" containerID="ec901324e0323cb29de5bc7e5330a4a51a3849396102c6f4abbbe08fb10c6d18" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.807027 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k2r86" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.833419 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-k2r86"] Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.836976 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-k2r86"] Nov 28 13:33:06 crc kubenswrapper[4857]: I1128 13:33:06.316959 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f529d1db-433d-4505-9436-5f0b88340ec7" path="/var/lib/kubelet/pods/f529d1db-433d-4505-9436-5f0b88340ec7/volumes" Nov 28 13:33:06 crc kubenswrapper[4857]: I1128 13:33:06.633688 4857 scope.go:117] "RemoveContainer" containerID="54ae539fd241fe10f7a6f886d7ce0d0e00d7f6e32b5853dc19f22bfb7586bccd" Nov 28 13:33:06 crc kubenswrapper[4857]: I1128 13:33:06.661094 4857 scope.go:117] "RemoveContainer" containerID="cc021b8b71750270aa7bc8af104a98ca9d5896f0e5f98099195ca3640e3e6c64" Nov 28 13:33:06 crc kubenswrapper[4857]: I1128 13:33:06.850811 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9lsfz"] Nov 28 13:33:06 crc kubenswrapper[4857]: W1128 13:33:06.857505 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod41c2321d_3333_4385_8078_11d398c724ff.slice/crio-9faffdf6de534fdbf65bcf887b7e35e9cbc4bc0a5e23eff51ca8154028cc0b9e WatchSource:0}: Error finding container 9faffdf6de534fdbf65bcf887b7e35e9cbc4bc0a5e23eff51ca8154028cc0b9e: Status 404 returned error can't find the container with id 9faffdf6de534fdbf65bcf887b7e35e9cbc4bc0a5e23eff51ca8154028cc0b9e Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.828074 4857 generic.go:334] "Generic (PLEG): container finished" podID="41c2321d-3333-4385-8078-11d398c724ff" containerID="b47402e860468eaf7cd31d4cf53b10118ff79b8eaca14eee2908c46652eef169" exitCode=0 Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.828946 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9lsfz" event={"ID":"41c2321d-3333-4385-8078-11d398c724ff","Type":"ContainerDied","Data":"b47402e860468eaf7cd31d4cf53b10118ff79b8eaca14eee2908c46652eef169"} Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.828969 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9lsfz" event={"ID":"41c2321d-3333-4385-8078-11d398c724ff","Type":"ContainerStarted","Data":"9faffdf6de534fdbf65bcf887b7e35e9cbc4bc0a5e23eff51ca8154028cc0b9e"} Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.831127 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-qlhzf" event={"ID":"07c3f6d4-5b62-4140-a9d8-a5f26841a487","Type":"ContainerStarted","Data":"e5527fc0c3461f0f449187dd28721f22fd8cfec5142e185d0fc8d1fbb0902f8f"} Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.833918 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-njxfb" event={"ID":"146db7a4-f901-4160-8053-7fe4f8063e22","Type":"ContainerStarted","Data":"7d942908f4ea665720861c6e02f67de61e76117f53e88886e214af6bda5a6694"} Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.835270 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-njxfb" Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.837557 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xchwf" event={"ID":"8aad4947-867d-42d1-af5d-d9d8284d21e7","Type":"ContainerStarted","Data":"99056ca98d8a33b61d6fe944222334de9d8188a99d7bba5cc415cbb05eb0e47e"} Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.837708 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xchwf" Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.839650 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-ht4fl" event={"ID":"3528f191-ca62-461c-a93f-8fb8758d76af","Type":"ContainerStarted","Data":"e682e786f78425e2c10536ff53f42a2b017c80e234c1d4d89fee9b86237a59f9"} Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.868295 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-njxfb" podStartSLOduration=2.29528322 podStartE2EDuration="8.868274064s" podCreationTimestamp="2025-11-28 13:32:59 +0000 UTC" firstStartedPulling="2025-11-28 13:33:00.140419208 +0000 UTC m=+872.167794375" lastFinishedPulling="2025-11-28 13:33:06.713410012 +0000 UTC m=+878.740785219" observedRunningTime="2025-11-28 13:33:07.868117629 +0000 UTC m=+879.895492826" watchObservedRunningTime="2025-11-28 13:33:07.868274064 +0000 UTC m=+879.895649241" Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.889016 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xchwf" podStartSLOduration=2.791316819 podStartE2EDuration="8.888994168s" podCreationTimestamp="2025-11-28 13:32:59 +0000 UTC" firstStartedPulling="2025-11-28 13:33:00.599985138 +0000 UTC m=+872.627360305" lastFinishedPulling="2025-11-28 13:33:06.697662487 +0000 UTC m=+878.725037654" observedRunningTime="2025-11-28 13:33:07.886997422 +0000 UTC m=+879.914372639" watchObservedRunningTime="2025-11-28 13:33:07.888994168 +0000 UTC m=+879.916369335" Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.913580 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-ht4fl" podStartSLOduration=3.213327199 podStartE2EDuration="8.913553522s" podCreationTimestamp="2025-11-28 13:32:59 +0000 UTC" firstStartedPulling="2025-11-28 13:33:00.985322582 +0000 UTC m=+873.012697749" lastFinishedPulling="2025-11-28 13:33:06.685548865 +0000 UTC m=+878.712924072" observedRunningTime="2025-11-28 13:33:07.909282251 +0000 UTC m=+879.936657438" watchObservedRunningTime="2025-11-28 13:33:07.913553522 +0000 UTC m=+879.940928699" Nov 28 13:33:09 crc kubenswrapper[4857]: I1128 13:33:09.864507 4857 generic.go:334] "Generic (PLEG): container finished" podID="41c2321d-3333-4385-8078-11d398c724ff" containerID="f329cfa003ca2c616925ae264690607a44cd9587e63fe6f4aebaf32623251b65" exitCode=0 Nov 28 13:33:09 crc kubenswrapper[4857]: I1128 13:33:09.864964 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9lsfz" event={"ID":"41c2321d-3333-4385-8078-11d398c724ff","Type":"ContainerDied","Data":"f329cfa003ca2c616925ae264690607a44cd9587e63fe6f4aebaf32623251b65"} Nov 28 13:33:09 crc kubenswrapper[4857]: I1128 13:33:09.871300 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-qlhzf" event={"ID":"07c3f6d4-5b62-4140-a9d8-a5f26841a487","Type":"ContainerStarted","Data":"a9137cda93bb21d1c10421e728282d82b52a8fc4cbd11ef4ed59e14a89821646"} Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.429789 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.430454 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.435451 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.469261 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-qlhzf" podStartSLOduration=2.809079531 podStartE2EDuration="11.469231909s" podCreationTimestamp="2025-11-28 13:32:59 +0000 UTC" firstStartedPulling="2025-11-28 13:33:00.587998419 +0000 UTC m=+872.615373586" lastFinishedPulling="2025-11-28 13:33:09.248150747 +0000 UTC m=+881.275525964" observedRunningTime="2025-11-28 13:33:09.916426687 +0000 UTC m=+881.943801844" watchObservedRunningTime="2025-11-28 13:33:10.469231909 +0000 UTC m=+882.496607116" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.880976 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9lsfz" event={"ID":"41c2321d-3333-4385-8078-11d398c724ff","Type":"ContainerStarted","Data":"af4ec39cc80f6a6282e79d49a32c9e90b10d7b91015dc63088a795882ddd800e"} Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.887187 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-8d58ccdfb-bszcc" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.904929 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-9lsfz" podStartSLOduration=5.449507468 podStartE2EDuration="7.904903895s" podCreationTimestamp="2025-11-28 13:33:03 +0000 UTC" firstStartedPulling="2025-11-28 13:33:07.830129467 +0000 UTC m=+879.857504634" lastFinishedPulling="2025-11-28 13:33:10.285525894 +0000 UTC m=+882.312901061" observedRunningTime="2025-11-28 13:33:10.898459113 +0000 UTC m=+882.925834290" watchObservedRunningTime="2025-11-28 13:33:10.904903895 +0000 UTC m=+882.932279072" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.958889 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-7plbl"] Nov 28 13:33:14 crc kubenswrapper[4857]: I1128 13:33:14.352307 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-9lsfz" Nov 28 13:33:14 crc kubenswrapper[4857]: I1128 13:33:14.352807 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-9lsfz" Nov 28 13:33:14 crc kubenswrapper[4857]: I1128 13:33:14.401444 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-9lsfz" Nov 28 13:33:15 crc kubenswrapper[4857]: I1128 13:33:15.134557 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-njxfb" Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.067019 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xchwf" Nov 28 13:33:24 crc kubenswrapper[4857]: I1128 13:33:24.394066 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-9lsfz" Nov 28 13:33:24 crc kubenswrapper[4857]: I1128 13:33:24.441401 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9lsfz"] Nov 28 13:33:24 crc kubenswrapper[4857]: I1128 13:33:24.971808 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-9lsfz" podUID="41c2321d-3333-4385-8078-11d398c724ff" containerName="registry-server" containerID="cri-o://af4ec39cc80f6a6282e79d49a32c9e90b10d7b91015dc63088a795882ddd800e" gracePeriod=2 Nov 28 13:33:25 crc kubenswrapper[4857]: I1128 13:33:25.324937 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9lsfz" Nov 28 13:33:25 crc kubenswrapper[4857]: I1128 13:33:25.423187 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf6qp\" (UniqueName: \"kubernetes.io/projected/41c2321d-3333-4385-8078-11d398c724ff-kube-api-access-gf6qp\") pod \"41c2321d-3333-4385-8078-11d398c724ff\" (UID: \"41c2321d-3333-4385-8078-11d398c724ff\") " Nov 28 13:33:25 crc kubenswrapper[4857]: I1128 13:33:25.423246 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41c2321d-3333-4385-8078-11d398c724ff-catalog-content\") pod \"41c2321d-3333-4385-8078-11d398c724ff\" (UID: \"41c2321d-3333-4385-8078-11d398c724ff\") " Nov 28 13:33:25 crc kubenswrapper[4857]: I1128 13:33:25.431626 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41c2321d-3333-4385-8078-11d398c724ff-kube-api-access-gf6qp" (OuterVolumeSpecName: "kube-api-access-gf6qp") pod "41c2321d-3333-4385-8078-11d398c724ff" (UID: "41c2321d-3333-4385-8078-11d398c724ff"). InnerVolumeSpecName "kube-api-access-gf6qp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:33:25 crc kubenswrapper[4857]: I1128 13:33:25.443720 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41c2321d-3333-4385-8078-11d398c724ff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "41c2321d-3333-4385-8078-11d398c724ff" (UID: "41c2321d-3333-4385-8078-11d398c724ff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:33:25 crc kubenswrapper[4857]: I1128 13:33:25.524487 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41c2321d-3333-4385-8078-11d398c724ff-utilities\") pod \"41c2321d-3333-4385-8078-11d398c724ff\" (UID: \"41c2321d-3333-4385-8078-11d398c724ff\") " Nov 28 13:33:25 crc kubenswrapper[4857]: I1128 13:33:25.524803 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf6qp\" (UniqueName: \"kubernetes.io/projected/41c2321d-3333-4385-8078-11d398c724ff-kube-api-access-gf6qp\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:25 crc kubenswrapper[4857]: I1128 13:33:25.524826 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41c2321d-3333-4385-8078-11d398c724ff-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:25 crc kubenswrapper[4857]: I1128 13:33:25.525315 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41c2321d-3333-4385-8078-11d398c724ff-utilities" (OuterVolumeSpecName: "utilities") pod "41c2321d-3333-4385-8078-11d398c724ff" (UID: "41c2321d-3333-4385-8078-11d398c724ff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:33:25 crc kubenswrapper[4857]: I1128 13:33:25.626181 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41c2321d-3333-4385-8078-11d398c724ff-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:25 crc kubenswrapper[4857]: I1128 13:33:25.979924 4857 generic.go:334] "Generic (PLEG): container finished" podID="41c2321d-3333-4385-8078-11d398c724ff" containerID="af4ec39cc80f6a6282e79d49a32c9e90b10d7b91015dc63088a795882ddd800e" exitCode=0 Nov 28 13:33:25 crc kubenswrapper[4857]: I1128 13:33:25.980296 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9lsfz" event={"ID":"41c2321d-3333-4385-8078-11d398c724ff","Type":"ContainerDied","Data":"af4ec39cc80f6a6282e79d49a32c9e90b10d7b91015dc63088a795882ddd800e"} Nov 28 13:33:25 crc kubenswrapper[4857]: I1128 13:33:25.980342 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9lsfz" event={"ID":"41c2321d-3333-4385-8078-11d398c724ff","Type":"ContainerDied","Data":"9faffdf6de534fdbf65bcf887b7e35e9cbc4bc0a5e23eff51ca8154028cc0b9e"} Nov 28 13:33:25 crc kubenswrapper[4857]: I1128 13:33:25.980375 4857 scope.go:117] "RemoveContainer" containerID="af4ec39cc80f6a6282e79d49a32c9e90b10d7b91015dc63088a795882ddd800e" Nov 28 13:33:25 crc kubenswrapper[4857]: I1128 13:33:25.981022 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9lsfz" Nov 28 13:33:25 crc kubenswrapper[4857]: I1128 13:33:25.999193 4857 scope.go:117] "RemoveContainer" containerID="f329cfa003ca2c616925ae264690607a44cd9587e63fe6f4aebaf32623251b65" Nov 28 13:33:26 crc kubenswrapper[4857]: I1128 13:33:26.020064 4857 scope.go:117] "RemoveContainer" containerID="b47402e860468eaf7cd31d4cf53b10118ff79b8eaca14eee2908c46652eef169" Nov 28 13:33:26 crc kubenswrapper[4857]: I1128 13:33:26.025206 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9lsfz"] Nov 28 13:33:26 crc kubenswrapper[4857]: I1128 13:33:26.028488 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-9lsfz"] Nov 28 13:33:26 crc kubenswrapper[4857]: I1128 13:33:26.051619 4857 scope.go:117] "RemoveContainer" containerID="af4ec39cc80f6a6282e79d49a32c9e90b10d7b91015dc63088a795882ddd800e" Nov 28 13:33:26 crc kubenswrapper[4857]: E1128 13:33:26.052070 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af4ec39cc80f6a6282e79d49a32c9e90b10d7b91015dc63088a795882ddd800e\": container with ID starting with af4ec39cc80f6a6282e79d49a32c9e90b10d7b91015dc63088a795882ddd800e not found: ID does not exist" containerID="af4ec39cc80f6a6282e79d49a32c9e90b10d7b91015dc63088a795882ddd800e" Nov 28 13:33:26 crc kubenswrapper[4857]: I1128 13:33:26.052126 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af4ec39cc80f6a6282e79d49a32c9e90b10d7b91015dc63088a795882ddd800e"} err="failed to get container status \"af4ec39cc80f6a6282e79d49a32c9e90b10d7b91015dc63088a795882ddd800e\": rpc error: code = NotFound desc = could not find container \"af4ec39cc80f6a6282e79d49a32c9e90b10d7b91015dc63088a795882ddd800e\": container with ID starting with af4ec39cc80f6a6282e79d49a32c9e90b10d7b91015dc63088a795882ddd800e not found: ID does not exist" Nov 28 13:33:26 crc kubenswrapper[4857]: I1128 13:33:26.052157 4857 scope.go:117] "RemoveContainer" containerID="f329cfa003ca2c616925ae264690607a44cd9587e63fe6f4aebaf32623251b65" Nov 28 13:33:26 crc kubenswrapper[4857]: E1128 13:33:26.052576 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f329cfa003ca2c616925ae264690607a44cd9587e63fe6f4aebaf32623251b65\": container with ID starting with f329cfa003ca2c616925ae264690607a44cd9587e63fe6f4aebaf32623251b65 not found: ID does not exist" containerID="f329cfa003ca2c616925ae264690607a44cd9587e63fe6f4aebaf32623251b65" Nov 28 13:33:26 crc kubenswrapper[4857]: I1128 13:33:26.052673 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f329cfa003ca2c616925ae264690607a44cd9587e63fe6f4aebaf32623251b65"} err="failed to get container status \"f329cfa003ca2c616925ae264690607a44cd9587e63fe6f4aebaf32623251b65\": rpc error: code = NotFound desc = could not find container \"f329cfa003ca2c616925ae264690607a44cd9587e63fe6f4aebaf32623251b65\": container with ID starting with f329cfa003ca2c616925ae264690607a44cd9587e63fe6f4aebaf32623251b65 not found: ID does not exist" Nov 28 13:33:26 crc kubenswrapper[4857]: I1128 13:33:26.052701 4857 scope.go:117] "RemoveContainer" containerID="b47402e860468eaf7cd31d4cf53b10118ff79b8eaca14eee2908c46652eef169" Nov 28 13:33:26 crc kubenswrapper[4857]: E1128 13:33:26.052964 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b47402e860468eaf7cd31d4cf53b10118ff79b8eaca14eee2908c46652eef169\": container with ID starting with b47402e860468eaf7cd31d4cf53b10118ff79b8eaca14eee2908c46652eef169 not found: ID does not exist" containerID="b47402e860468eaf7cd31d4cf53b10118ff79b8eaca14eee2908c46652eef169" Nov 28 13:33:26 crc kubenswrapper[4857]: I1128 13:33:26.052991 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b47402e860468eaf7cd31d4cf53b10118ff79b8eaca14eee2908c46652eef169"} err="failed to get container status \"b47402e860468eaf7cd31d4cf53b10118ff79b8eaca14eee2908c46652eef169\": rpc error: code = NotFound desc = could not find container \"b47402e860468eaf7cd31d4cf53b10118ff79b8eaca14eee2908c46652eef169\": container with ID starting with b47402e860468eaf7cd31d4cf53b10118ff79b8eaca14eee2908c46652eef169 not found: ID does not exist" Nov 28 13:33:26 crc kubenswrapper[4857]: I1128 13:33:26.334204 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41c2321d-3333-4385-8078-11d398c724ff" path="/var/lib/kubelet/pods/41c2321d-3333-4385-8078-11d398c724ff/volumes" Nov 28 13:33:32 crc kubenswrapper[4857]: I1128 13:33:32.825208 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s"] Nov 28 13:33:32 crc kubenswrapper[4857]: E1128 13:33:32.825958 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41c2321d-3333-4385-8078-11d398c724ff" containerName="extract-content" Nov 28 13:33:32 crc kubenswrapper[4857]: I1128 13:33:32.825971 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="41c2321d-3333-4385-8078-11d398c724ff" containerName="extract-content" Nov 28 13:33:32 crc kubenswrapper[4857]: E1128 13:33:32.825986 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41c2321d-3333-4385-8078-11d398c724ff" containerName="registry-server" Nov 28 13:33:32 crc kubenswrapper[4857]: I1128 13:33:32.825992 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="41c2321d-3333-4385-8078-11d398c724ff" containerName="registry-server" Nov 28 13:33:32 crc kubenswrapper[4857]: E1128 13:33:32.825999 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f529d1db-433d-4505-9436-5f0b88340ec7" containerName="extract-utilities" Nov 28 13:33:32 crc kubenswrapper[4857]: I1128 13:33:32.826005 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f529d1db-433d-4505-9436-5f0b88340ec7" containerName="extract-utilities" Nov 28 13:33:32 crc kubenswrapper[4857]: E1128 13:33:32.826013 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f529d1db-433d-4505-9436-5f0b88340ec7" containerName="extract-content" Nov 28 13:33:32 crc kubenswrapper[4857]: I1128 13:33:32.826019 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f529d1db-433d-4505-9436-5f0b88340ec7" containerName="extract-content" Nov 28 13:33:32 crc kubenswrapper[4857]: E1128 13:33:32.826029 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41c2321d-3333-4385-8078-11d398c724ff" containerName="extract-utilities" Nov 28 13:33:32 crc kubenswrapper[4857]: I1128 13:33:32.826035 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="41c2321d-3333-4385-8078-11d398c724ff" containerName="extract-utilities" Nov 28 13:33:32 crc kubenswrapper[4857]: E1128 13:33:32.826046 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f529d1db-433d-4505-9436-5f0b88340ec7" containerName="registry-server" Nov 28 13:33:32 crc kubenswrapper[4857]: I1128 13:33:32.826051 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f529d1db-433d-4505-9436-5f0b88340ec7" containerName="registry-server" Nov 28 13:33:32 crc kubenswrapper[4857]: I1128 13:33:32.826161 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f529d1db-433d-4505-9436-5f0b88340ec7" containerName="registry-server" Nov 28 13:33:32 crc kubenswrapper[4857]: I1128 13:33:32.826177 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="41c2321d-3333-4385-8078-11d398c724ff" containerName="registry-server" Nov 28 13:33:32 crc kubenswrapper[4857]: I1128 13:33:32.826968 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s" Nov 28 13:33:32 crc kubenswrapper[4857]: I1128 13:33:32.829027 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 13:33:32 crc kubenswrapper[4857]: I1128 13:33:32.850910 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s"] Nov 28 13:33:33 crc kubenswrapper[4857]: I1128 13:33:33.020889 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92lcb\" (UniqueName: \"kubernetes.io/projected/28fef5c0-5b1c-4bc3-a288-6268042fe12c-kube-api-access-92lcb\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s\" (UID: \"28fef5c0-5b1c-4bc3-a288-6268042fe12c\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s" Nov 28 13:33:33 crc kubenswrapper[4857]: I1128 13:33:33.020955 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/28fef5c0-5b1c-4bc3-a288-6268042fe12c-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s\" (UID: \"28fef5c0-5b1c-4bc3-a288-6268042fe12c\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s" Nov 28 13:33:33 crc kubenswrapper[4857]: I1128 13:33:33.020983 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/28fef5c0-5b1c-4bc3-a288-6268042fe12c-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s\" (UID: \"28fef5c0-5b1c-4bc3-a288-6268042fe12c\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s" Nov 28 13:33:33 crc kubenswrapper[4857]: I1128 13:33:33.122143 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92lcb\" (UniqueName: \"kubernetes.io/projected/28fef5c0-5b1c-4bc3-a288-6268042fe12c-kube-api-access-92lcb\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s\" (UID: \"28fef5c0-5b1c-4bc3-a288-6268042fe12c\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s" Nov 28 13:33:33 crc kubenswrapper[4857]: I1128 13:33:33.122218 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/28fef5c0-5b1c-4bc3-a288-6268042fe12c-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s\" (UID: \"28fef5c0-5b1c-4bc3-a288-6268042fe12c\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s" Nov 28 13:33:33 crc kubenswrapper[4857]: I1128 13:33:33.122240 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/28fef5c0-5b1c-4bc3-a288-6268042fe12c-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s\" (UID: \"28fef5c0-5b1c-4bc3-a288-6268042fe12c\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s" Nov 28 13:33:33 crc kubenswrapper[4857]: I1128 13:33:33.122807 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/28fef5c0-5b1c-4bc3-a288-6268042fe12c-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s\" (UID: \"28fef5c0-5b1c-4bc3-a288-6268042fe12c\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s" Nov 28 13:33:33 crc kubenswrapper[4857]: I1128 13:33:33.123096 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/28fef5c0-5b1c-4bc3-a288-6268042fe12c-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s\" (UID: \"28fef5c0-5b1c-4bc3-a288-6268042fe12c\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s" Nov 28 13:33:33 crc kubenswrapper[4857]: I1128 13:33:33.158427 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92lcb\" (UniqueName: \"kubernetes.io/projected/28fef5c0-5b1c-4bc3-a288-6268042fe12c-kube-api-access-92lcb\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s\" (UID: \"28fef5c0-5b1c-4bc3-a288-6268042fe12c\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s" Nov 28 13:33:33 crc kubenswrapper[4857]: I1128 13:33:33.177573 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:33:33 crc kubenswrapper[4857]: I1128 13:33:33.177635 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:33:33 crc kubenswrapper[4857]: I1128 13:33:33.444820 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s" Nov 28 13:33:33 crc kubenswrapper[4857]: I1128 13:33:33.675289 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s"] Nov 28 13:33:34 crc kubenswrapper[4857]: I1128 13:33:34.023403 4857 generic.go:334] "Generic (PLEG): container finished" podID="28fef5c0-5b1c-4bc3-a288-6268042fe12c" containerID="ac85f274cf27e9832e62b8ebb3f1fb6030e37f7c7546ae4a57d64e5d59145bb3" exitCode=0 Nov 28 13:33:34 crc kubenswrapper[4857]: I1128 13:33:34.023487 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s" event={"ID":"28fef5c0-5b1c-4bc3-a288-6268042fe12c","Type":"ContainerDied","Data":"ac85f274cf27e9832e62b8ebb3f1fb6030e37f7c7546ae4a57d64e5d59145bb3"} Nov 28 13:33:34 crc kubenswrapper[4857]: I1128 13:33:34.023770 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s" event={"ID":"28fef5c0-5b1c-4bc3-a288-6268042fe12c","Type":"ContainerStarted","Data":"044be666b3e9fbcb37a5a5641fa8c9379d6654207f60f31226875d7da5fb82ff"} Nov 28 13:33:35 crc kubenswrapper[4857]: I1128 13:33:35.997470 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-7plbl" podUID="fef72e7c-9edd-4a6f-8648-aaaf65497bb6" containerName="console" containerID="cri-o://20c39de4bb4874b18d0b2f8718f9dda04b695996aceb4d1f7ee6081394d7d0fd" gracePeriod=15 Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.039832 4857 generic.go:334] "Generic (PLEG): container finished" podID="28fef5c0-5b1c-4bc3-a288-6268042fe12c" containerID="de01474b7b912c4d3ed3282a52dd333b7693661472910b5316bef6b6bbe176bf" exitCode=0 Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.039909 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s" event={"ID":"28fef5c0-5b1c-4bc3-a288-6268042fe12c","Type":"ContainerDied","Data":"de01474b7b912c4d3ed3282a52dd333b7693661472910b5316bef6b6bbe176bf"} Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.428087 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-7plbl_fef72e7c-9edd-4a6f-8648-aaaf65497bb6/console/0.log" Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.428371 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.585190 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-console-config\") pod \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.585242 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnt67\" (UniqueName: \"kubernetes.io/projected/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-kube-api-access-mnt67\") pod \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.585277 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-console-serving-cert\") pod \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.585325 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-oauth-serving-cert\") pod \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.585386 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-service-ca\") pod \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.585411 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-trusted-ca-bundle\") pod \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.585438 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-console-oauth-config\") pod \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\" (UID: \"fef72e7c-9edd-4a6f-8648-aaaf65497bb6\") " Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.585974 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-console-config" (OuterVolumeSpecName: "console-config") pod "fef72e7c-9edd-4a6f-8648-aaaf65497bb6" (UID: "fef72e7c-9edd-4a6f-8648-aaaf65497bb6"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.586912 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "fef72e7c-9edd-4a6f-8648-aaaf65497bb6" (UID: "fef72e7c-9edd-4a6f-8648-aaaf65497bb6"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.586989 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-service-ca" (OuterVolumeSpecName: "service-ca") pod "fef72e7c-9edd-4a6f-8648-aaaf65497bb6" (UID: "fef72e7c-9edd-4a6f-8648-aaaf65497bb6"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.587343 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "fef72e7c-9edd-4a6f-8648-aaaf65497bb6" (UID: "fef72e7c-9edd-4a6f-8648-aaaf65497bb6"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.592371 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "fef72e7c-9edd-4a6f-8648-aaaf65497bb6" (UID: "fef72e7c-9edd-4a6f-8648-aaaf65497bb6"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.592545 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-kube-api-access-mnt67" (OuterVolumeSpecName: "kube-api-access-mnt67") pod "fef72e7c-9edd-4a6f-8648-aaaf65497bb6" (UID: "fef72e7c-9edd-4a6f-8648-aaaf65497bb6"). InnerVolumeSpecName "kube-api-access-mnt67". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.593702 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "fef72e7c-9edd-4a6f-8648-aaaf65497bb6" (UID: "fef72e7c-9edd-4a6f-8648-aaaf65497bb6"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.686934 4857 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-console-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.686965 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnt67\" (UniqueName: \"kubernetes.io/projected/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-kube-api-access-mnt67\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.686979 4857 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.686990 4857 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.687000 4857 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.687011 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:36 crc kubenswrapper[4857]: I1128 13:33:36.687021 4857 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fef72e7c-9edd-4a6f-8648-aaaf65497bb6-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:37 crc kubenswrapper[4857]: I1128 13:33:37.049451 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-7plbl_fef72e7c-9edd-4a6f-8648-aaaf65497bb6/console/0.log" Nov 28 13:33:37 crc kubenswrapper[4857]: I1128 13:33:37.049531 4857 generic.go:334] "Generic (PLEG): container finished" podID="fef72e7c-9edd-4a6f-8648-aaaf65497bb6" containerID="20c39de4bb4874b18d0b2f8718f9dda04b695996aceb4d1f7ee6081394d7d0fd" exitCode=2 Nov 28 13:33:37 crc kubenswrapper[4857]: I1128 13:33:37.049666 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-7plbl" Nov 28 13:33:37 crc kubenswrapper[4857]: I1128 13:33:37.049665 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-7plbl" event={"ID":"fef72e7c-9edd-4a6f-8648-aaaf65497bb6","Type":"ContainerDied","Data":"20c39de4bb4874b18d0b2f8718f9dda04b695996aceb4d1f7ee6081394d7d0fd"} Nov 28 13:33:37 crc kubenswrapper[4857]: I1128 13:33:37.049970 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-7plbl" event={"ID":"fef72e7c-9edd-4a6f-8648-aaaf65497bb6","Type":"ContainerDied","Data":"82ca41a0cede04bc63a401e5567460ab9c79a54d265d0c91acf3776fb9b7e9db"} Nov 28 13:33:37 crc kubenswrapper[4857]: I1128 13:33:37.050072 4857 scope.go:117] "RemoveContainer" containerID="20c39de4bb4874b18d0b2f8718f9dda04b695996aceb4d1f7ee6081394d7d0fd" Nov 28 13:33:37 crc kubenswrapper[4857]: I1128 13:33:37.054311 4857 generic.go:334] "Generic (PLEG): container finished" podID="28fef5c0-5b1c-4bc3-a288-6268042fe12c" containerID="eb637eb87dbcc996ad3a288a74ab9fdf37648227d8606ee3e9e01a3e5325f0c4" exitCode=0 Nov 28 13:33:37 crc kubenswrapper[4857]: I1128 13:33:37.054365 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s" event={"ID":"28fef5c0-5b1c-4bc3-a288-6268042fe12c","Type":"ContainerDied","Data":"eb637eb87dbcc996ad3a288a74ab9fdf37648227d8606ee3e9e01a3e5325f0c4"} Nov 28 13:33:37 crc kubenswrapper[4857]: I1128 13:33:37.086688 4857 scope.go:117] "RemoveContainer" containerID="20c39de4bb4874b18d0b2f8718f9dda04b695996aceb4d1f7ee6081394d7d0fd" Nov 28 13:33:37 crc kubenswrapper[4857]: E1128 13:33:37.087205 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20c39de4bb4874b18d0b2f8718f9dda04b695996aceb4d1f7ee6081394d7d0fd\": container with ID starting with 20c39de4bb4874b18d0b2f8718f9dda04b695996aceb4d1f7ee6081394d7d0fd not found: ID does not exist" containerID="20c39de4bb4874b18d0b2f8718f9dda04b695996aceb4d1f7ee6081394d7d0fd" Nov 28 13:33:37 crc kubenswrapper[4857]: I1128 13:33:37.087254 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20c39de4bb4874b18d0b2f8718f9dda04b695996aceb4d1f7ee6081394d7d0fd"} err="failed to get container status \"20c39de4bb4874b18d0b2f8718f9dda04b695996aceb4d1f7ee6081394d7d0fd\": rpc error: code = NotFound desc = could not find container \"20c39de4bb4874b18d0b2f8718f9dda04b695996aceb4d1f7ee6081394d7d0fd\": container with ID starting with 20c39de4bb4874b18d0b2f8718f9dda04b695996aceb4d1f7ee6081394d7d0fd not found: ID does not exist" Nov 28 13:33:37 crc kubenswrapper[4857]: I1128 13:33:37.103460 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-7plbl"] Nov 28 13:33:37 crc kubenswrapper[4857]: I1128 13:33:37.107865 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-7plbl"] Nov 28 13:33:38 crc kubenswrapper[4857]: I1128 13:33:38.317904 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fef72e7c-9edd-4a6f-8648-aaaf65497bb6" path="/var/lib/kubelet/pods/fef72e7c-9edd-4a6f-8648-aaaf65497bb6/volumes" Nov 28 13:33:38 crc kubenswrapper[4857]: I1128 13:33:38.344362 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s" Nov 28 13:33:38 crc kubenswrapper[4857]: I1128 13:33:38.414499 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92lcb\" (UniqueName: \"kubernetes.io/projected/28fef5c0-5b1c-4bc3-a288-6268042fe12c-kube-api-access-92lcb\") pod \"28fef5c0-5b1c-4bc3-a288-6268042fe12c\" (UID: \"28fef5c0-5b1c-4bc3-a288-6268042fe12c\") " Nov 28 13:33:38 crc kubenswrapper[4857]: I1128 13:33:38.414733 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/28fef5c0-5b1c-4bc3-a288-6268042fe12c-bundle\") pod \"28fef5c0-5b1c-4bc3-a288-6268042fe12c\" (UID: \"28fef5c0-5b1c-4bc3-a288-6268042fe12c\") " Nov 28 13:33:38 crc kubenswrapper[4857]: I1128 13:33:38.414807 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/28fef5c0-5b1c-4bc3-a288-6268042fe12c-util\") pod \"28fef5c0-5b1c-4bc3-a288-6268042fe12c\" (UID: \"28fef5c0-5b1c-4bc3-a288-6268042fe12c\") " Nov 28 13:33:38 crc kubenswrapper[4857]: I1128 13:33:38.416837 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28fef5c0-5b1c-4bc3-a288-6268042fe12c-bundle" (OuterVolumeSpecName: "bundle") pod "28fef5c0-5b1c-4bc3-a288-6268042fe12c" (UID: "28fef5c0-5b1c-4bc3-a288-6268042fe12c"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:33:38 crc kubenswrapper[4857]: I1128 13:33:38.421538 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28fef5c0-5b1c-4bc3-a288-6268042fe12c-kube-api-access-92lcb" (OuterVolumeSpecName: "kube-api-access-92lcb") pod "28fef5c0-5b1c-4bc3-a288-6268042fe12c" (UID: "28fef5c0-5b1c-4bc3-a288-6268042fe12c"). InnerVolumeSpecName "kube-api-access-92lcb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:33:38 crc kubenswrapper[4857]: I1128 13:33:38.429353 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28fef5c0-5b1c-4bc3-a288-6268042fe12c-util" (OuterVolumeSpecName: "util") pod "28fef5c0-5b1c-4bc3-a288-6268042fe12c" (UID: "28fef5c0-5b1c-4bc3-a288-6268042fe12c"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:33:38 crc kubenswrapper[4857]: I1128 13:33:38.515994 4857 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/28fef5c0-5b1c-4bc3-a288-6268042fe12c-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:38 crc kubenswrapper[4857]: I1128 13:33:38.516027 4857 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/28fef5c0-5b1c-4bc3-a288-6268042fe12c-util\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:38 crc kubenswrapper[4857]: I1128 13:33:38.516038 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92lcb\" (UniqueName: \"kubernetes.io/projected/28fef5c0-5b1c-4bc3-a288-6268042fe12c-kube-api-access-92lcb\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:39 crc kubenswrapper[4857]: I1128 13:33:39.072885 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s" event={"ID":"28fef5c0-5b1c-4bc3-a288-6268042fe12c","Type":"ContainerDied","Data":"044be666b3e9fbcb37a5a5641fa8c9379d6654207f60f31226875d7da5fb82ff"} Nov 28 13:33:39 crc kubenswrapper[4857]: I1128 13:33:39.072941 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s" Nov 28 13:33:39 crc kubenswrapper[4857]: I1128 13:33:39.072948 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="044be666b3e9fbcb37a5a5641fa8c9379d6654207f60f31226875d7da5fb82ff" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.178974 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8gzrl"] Nov 28 13:33:40 crc kubenswrapper[4857]: E1128 13:33:40.179457 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28fef5c0-5b1c-4bc3-a288-6268042fe12c" containerName="pull" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.179470 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="28fef5c0-5b1c-4bc3-a288-6268042fe12c" containerName="pull" Nov 28 13:33:40 crc kubenswrapper[4857]: E1128 13:33:40.179482 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fef72e7c-9edd-4a6f-8648-aaaf65497bb6" containerName="console" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.179488 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fef72e7c-9edd-4a6f-8648-aaaf65497bb6" containerName="console" Nov 28 13:33:40 crc kubenswrapper[4857]: E1128 13:33:40.179495 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28fef5c0-5b1c-4bc3-a288-6268042fe12c" containerName="util" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.179501 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="28fef5c0-5b1c-4bc3-a288-6268042fe12c" containerName="util" Nov 28 13:33:40 crc kubenswrapper[4857]: E1128 13:33:40.179518 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28fef5c0-5b1c-4bc3-a288-6268042fe12c" containerName="extract" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.179523 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="28fef5c0-5b1c-4bc3-a288-6268042fe12c" containerName="extract" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.179607 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="fef72e7c-9edd-4a6f-8648-aaaf65497bb6" containerName="console" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.179623 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="28fef5c0-5b1c-4bc3-a288-6268042fe12c" containerName="extract" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.180345 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8gzrl" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.223233 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8gzrl"] Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.234993 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wl7m\" (UniqueName: \"kubernetes.io/projected/9d3f76de-0b2a-4472-b7d8-0d5d42e73873-kube-api-access-6wl7m\") pod \"certified-operators-8gzrl\" (UID: \"9d3f76de-0b2a-4472-b7d8-0d5d42e73873\") " pod="openshift-marketplace/certified-operators-8gzrl" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.235037 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d3f76de-0b2a-4472-b7d8-0d5d42e73873-catalog-content\") pod \"certified-operators-8gzrl\" (UID: \"9d3f76de-0b2a-4472-b7d8-0d5d42e73873\") " pod="openshift-marketplace/certified-operators-8gzrl" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.235108 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d3f76de-0b2a-4472-b7d8-0d5d42e73873-utilities\") pod \"certified-operators-8gzrl\" (UID: \"9d3f76de-0b2a-4472-b7d8-0d5d42e73873\") " pod="openshift-marketplace/certified-operators-8gzrl" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.335817 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wl7m\" (UniqueName: \"kubernetes.io/projected/9d3f76de-0b2a-4472-b7d8-0d5d42e73873-kube-api-access-6wl7m\") pod \"certified-operators-8gzrl\" (UID: \"9d3f76de-0b2a-4472-b7d8-0d5d42e73873\") " pod="openshift-marketplace/certified-operators-8gzrl" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.335870 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d3f76de-0b2a-4472-b7d8-0d5d42e73873-catalog-content\") pod \"certified-operators-8gzrl\" (UID: \"9d3f76de-0b2a-4472-b7d8-0d5d42e73873\") " pod="openshift-marketplace/certified-operators-8gzrl" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.335918 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d3f76de-0b2a-4472-b7d8-0d5d42e73873-utilities\") pod \"certified-operators-8gzrl\" (UID: \"9d3f76de-0b2a-4472-b7d8-0d5d42e73873\") " pod="openshift-marketplace/certified-operators-8gzrl" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.336446 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d3f76de-0b2a-4472-b7d8-0d5d42e73873-utilities\") pod \"certified-operators-8gzrl\" (UID: \"9d3f76de-0b2a-4472-b7d8-0d5d42e73873\") " pod="openshift-marketplace/certified-operators-8gzrl" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.336516 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d3f76de-0b2a-4472-b7d8-0d5d42e73873-catalog-content\") pod \"certified-operators-8gzrl\" (UID: \"9d3f76de-0b2a-4472-b7d8-0d5d42e73873\") " pod="openshift-marketplace/certified-operators-8gzrl" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.369840 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wl7m\" (UniqueName: \"kubernetes.io/projected/9d3f76de-0b2a-4472-b7d8-0d5d42e73873-kube-api-access-6wl7m\") pod \"certified-operators-8gzrl\" (UID: \"9d3f76de-0b2a-4472-b7d8-0d5d42e73873\") " pod="openshift-marketplace/certified-operators-8gzrl" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.523201 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8gzrl" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.779647 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8gzrl"] Nov 28 13:33:40 crc kubenswrapper[4857]: W1128 13:33:40.781144 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d3f76de_0b2a_4472_b7d8_0d5d42e73873.slice/crio-76fc978d7c8b9cef8a42f4b80490f6178ee16c8993d134c644d784711a39883a WatchSource:0}: Error finding container 76fc978d7c8b9cef8a42f4b80490f6178ee16c8993d134c644d784711a39883a: Status 404 returned error can't find the container with id 76fc978d7c8b9cef8a42f4b80490f6178ee16c8993d134c644d784711a39883a Nov 28 13:33:41 crc kubenswrapper[4857]: I1128 13:33:41.084532 4857 generic.go:334] "Generic (PLEG): container finished" podID="9d3f76de-0b2a-4472-b7d8-0d5d42e73873" containerID="34d0e3edebb253f3063d7a32fd2238331437721018b5e0afbfbdced84b623f22" exitCode=0 Nov 28 13:33:41 crc kubenswrapper[4857]: I1128 13:33:41.084620 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8gzrl" event={"ID":"9d3f76de-0b2a-4472-b7d8-0d5d42e73873","Type":"ContainerDied","Data":"34d0e3edebb253f3063d7a32fd2238331437721018b5e0afbfbdced84b623f22"} Nov 28 13:33:41 crc kubenswrapper[4857]: I1128 13:33:41.084875 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8gzrl" event={"ID":"9d3f76de-0b2a-4472-b7d8-0d5d42e73873","Type":"ContainerStarted","Data":"76fc978d7c8b9cef8a42f4b80490f6178ee16c8993d134c644d784711a39883a"} Nov 28 13:33:43 crc kubenswrapper[4857]: I1128 13:33:43.097655 4857 generic.go:334] "Generic (PLEG): container finished" podID="9d3f76de-0b2a-4472-b7d8-0d5d42e73873" containerID="4af1c8f375296c1c0cd80c747b7ded579a7459c100b5f67576493a90dced9f04" exitCode=0 Nov 28 13:33:43 crc kubenswrapper[4857]: I1128 13:33:43.098785 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8gzrl" event={"ID":"9d3f76de-0b2a-4472-b7d8-0d5d42e73873","Type":"ContainerDied","Data":"4af1c8f375296c1c0cd80c747b7ded579a7459c100b5f67576493a90dced9f04"} Nov 28 13:33:44 crc kubenswrapper[4857]: I1128 13:33:44.108886 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8gzrl" event={"ID":"9d3f76de-0b2a-4472-b7d8-0d5d42e73873","Type":"ContainerStarted","Data":"a4c37c876f2ac66c68dac66c1111fa7d099872eec9a6c3f8b6634e475ccb4b59"} Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.652185 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8gzrl" podStartSLOduration=4.200000055 podStartE2EDuration="6.652153982s" podCreationTimestamp="2025-11-28 13:33:40 +0000 UTC" firstStartedPulling="2025-11-28 13:33:41.085817907 +0000 UTC m=+913.113193074" lastFinishedPulling="2025-11-28 13:33:43.537971834 +0000 UTC m=+915.565347001" observedRunningTime="2025-11-28 13:33:44.127540061 +0000 UTC m=+916.154915238" watchObservedRunningTime="2025-11-28 13:33:46.652153982 +0000 UTC m=+918.679529189" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.657890 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp"] Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.659054 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.661314 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.661425 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.661558 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.666265 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-fvzs2" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.666626 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.669539 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp"] Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.758551 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4c4fc2c2-42ae-4899-b718-b86cbde512ab-webhook-cert\") pod \"metallb-operator-controller-manager-8475cb8447-qrwdp\" (UID: \"4c4fc2c2-42ae-4899-b718-b86cbde512ab\") " pod="metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.758602 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xz9ns\" (UniqueName: \"kubernetes.io/projected/4c4fc2c2-42ae-4899-b718-b86cbde512ab-kube-api-access-xz9ns\") pod \"metallb-operator-controller-manager-8475cb8447-qrwdp\" (UID: \"4c4fc2c2-42ae-4899-b718-b86cbde512ab\") " pod="metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.758643 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4c4fc2c2-42ae-4899-b718-b86cbde512ab-apiservice-cert\") pod \"metallb-operator-controller-manager-8475cb8447-qrwdp\" (UID: \"4c4fc2c2-42ae-4899-b718-b86cbde512ab\") " pod="metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.860236 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4c4fc2c2-42ae-4899-b718-b86cbde512ab-webhook-cert\") pod \"metallb-operator-controller-manager-8475cb8447-qrwdp\" (UID: \"4c4fc2c2-42ae-4899-b718-b86cbde512ab\") " pod="metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.860289 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xz9ns\" (UniqueName: \"kubernetes.io/projected/4c4fc2c2-42ae-4899-b718-b86cbde512ab-kube-api-access-xz9ns\") pod \"metallb-operator-controller-manager-8475cb8447-qrwdp\" (UID: \"4c4fc2c2-42ae-4899-b718-b86cbde512ab\") " pod="metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.860346 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4c4fc2c2-42ae-4899-b718-b86cbde512ab-apiservice-cert\") pod \"metallb-operator-controller-manager-8475cb8447-qrwdp\" (UID: \"4c4fc2c2-42ae-4899-b718-b86cbde512ab\") " pod="metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.870688 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4c4fc2c2-42ae-4899-b718-b86cbde512ab-webhook-cert\") pod \"metallb-operator-controller-manager-8475cb8447-qrwdp\" (UID: \"4c4fc2c2-42ae-4899-b718-b86cbde512ab\") " pod="metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.873316 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4c4fc2c2-42ae-4899-b718-b86cbde512ab-apiservice-cert\") pod \"metallb-operator-controller-manager-8475cb8447-qrwdp\" (UID: \"4c4fc2c2-42ae-4899-b718-b86cbde512ab\") " pod="metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.882145 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xz9ns\" (UniqueName: \"kubernetes.io/projected/4c4fc2c2-42ae-4899-b718-b86cbde512ab-kube-api-access-xz9ns\") pod \"metallb-operator-controller-manager-8475cb8447-qrwdp\" (UID: \"4c4fc2c2-42ae-4899-b718-b86cbde512ab\") " pod="metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.978872 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.000204 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n"] Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.004887 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.008671 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-jfq4m" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.008878 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.008981 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.012230 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n"] Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.066579 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/65b602be-5783-491e-b2e6-f8abd937820e-webhook-cert\") pod \"metallb-operator-webhook-server-77fd5fc7fd-q549n\" (UID: \"65b602be-5783-491e-b2e6-f8abd937820e\") " pod="metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.066641 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/65b602be-5783-491e-b2e6-f8abd937820e-apiservice-cert\") pod \"metallb-operator-webhook-server-77fd5fc7fd-q549n\" (UID: \"65b602be-5783-491e-b2e6-f8abd937820e\") " pod="metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.066677 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmddk\" (UniqueName: \"kubernetes.io/projected/65b602be-5783-491e-b2e6-f8abd937820e-kube-api-access-hmddk\") pod \"metallb-operator-webhook-server-77fd5fc7fd-q549n\" (UID: \"65b602be-5783-491e-b2e6-f8abd937820e\") " pod="metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.167693 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmddk\" (UniqueName: \"kubernetes.io/projected/65b602be-5783-491e-b2e6-f8abd937820e-kube-api-access-hmddk\") pod \"metallb-operator-webhook-server-77fd5fc7fd-q549n\" (UID: \"65b602be-5783-491e-b2e6-f8abd937820e\") " pod="metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.167775 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/65b602be-5783-491e-b2e6-f8abd937820e-webhook-cert\") pod \"metallb-operator-webhook-server-77fd5fc7fd-q549n\" (UID: \"65b602be-5783-491e-b2e6-f8abd937820e\") " pod="metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.167816 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/65b602be-5783-491e-b2e6-f8abd937820e-apiservice-cert\") pod \"metallb-operator-webhook-server-77fd5fc7fd-q549n\" (UID: \"65b602be-5783-491e-b2e6-f8abd937820e\") " pod="metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.171969 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/65b602be-5783-491e-b2e6-f8abd937820e-apiservice-cert\") pod \"metallb-operator-webhook-server-77fd5fc7fd-q549n\" (UID: \"65b602be-5783-491e-b2e6-f8abd937820e\") " pod="metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.172602 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/65b602be-5783-491e-b2e6-f8abd937820e-webhook-cert\") pod \"metallb-operator-webhook-server-77fd5fc7fd-q549n\" (UID: \"65b602be-5783-491e-b2e6-f8abd937820e\") " pod="metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.186109 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmddk\" (UniqueName: \"kubernetes.io/projected/65b602be-5783-491e-b2e6-f8abd937820e-kube-api-access-hmddk\") pod \"metallb-operator-webhook-server-77fd5fc7fd-q549n\" (UID: \"65b602be-5783-491e-b2e6-f8abd937820e\") " pod="metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.359569 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.712026 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp"] Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.068406 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n"] Nov 28 13:33:48 crc kubenswrapper[4857]: W1128 13:33:48.076436 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65b602be_5783_491e_b2e6_f8abd937820e.slice/crio-4acf4bf1d23cc3c28ed04beee9d10ee6b857c1465762f33e14fef0fa12e8972e WatchSource:0}: Error finding container 4acf4bf1d23cc3c28ed04beee9d10ee6b857c1465762f33e14fef0fa12e8972e: Status 404 returned error can't find the container with id 4acf4bf1d23cc3c28ed04beee9d10ee6b857c1465762f33e14fef0fa12e8972e Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.129698 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n" event={"ID":"65b602be-5783-491e-b2e6-f8abd937820e","Type":"ContainerStarted","Data":"4acf4bf1d23cc3c28ed04beee9d10ee6b857c1465762f33e14fef0fa12e8972e"} Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.131109 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp" event={"ID":"4c4fc2c2-42ae-4899-b718-b86cbde512ab","Type":"ContainerStarted","Data":"269de0f781a1f5ecf09a94b35b905ef94b42e6dfe14cbfccfe43b5a631f4366d"} Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.523403 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8gzrl" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.523808 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8gzrl" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.590507 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8gzrl" Nov 28 13:33:51 crc kubenswrapper[4857]: I1128 13:33:51.214784 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8gzrl" Nov 28 13:33:51 crc kubenswrapper[4857]: I1128 13:33:51.267656 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8gzrl"] Nov 28 13:33:53 crc kubenswrapper[4857]: I1128 13:33:53.287263 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8gzrl" podUID="9d3f76de-0b2a-4472-b7d8-0d5d42e73873" containerName="registry-server" containerID="cri-o://a4c37c876f2ac66c68dac66c1111fa7d099872eec9a6c3f8b6634e475ccb4b59" gracePeriod=2 Nov 28 13:33:54 crc kubenswrapper[4857]: I1128 13:33:54.306340 4857 generic.go:334] "Generic (PLEG): container finished" podID="9d3f76de-0b2a-4472-b7d8-0d5d42e73873" containerID="a4c37c876f2ac66c68dac66c1111fa7d099872eec9a6c3f8b6634e475ccb4b59" exitCode=0 Nov 28 13:33:54 crc kubenswrapper[4857]: I1128 13:33:54.306576 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8gzrl" event={"ID":"9d3f76de-0b2a-4472-b7d8-0d5d42e73873","Type":"ContainerDied","Data":"a4c37c876f2ac66c68dac66c1111fa7d099872eec9a6c3f8b6634e475ccb4b59"} Nov 28 13:33:55 crc kubenswrapper[4857]: I1128 13:33:55.968790 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8gzrl" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.048495 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d3f76de-0b2a-4472-b7d8-0d5d42e73873-utilities\") pod \"9d3f76de-0b2a-4472-b7d8-0d5d42e73873\" (UID: \"9d3f76de-0b2a-4472-b7d8-0d5d42e73873\") " Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.048564 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d3f76de-0b2a-4472-b7d8-0d5d42e73873-catalog-content\") pod \"9d3f76de-0b2a-4472-b7d8-0d5d42e73873\" (UID: \"9d3f76de-0b2a-4472-b7d8-0d5d42e73873\") " Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.048601 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6wl7m\" (UniqueName: \"kubernetes.io/projected/9d3f76de-0b2a-4472-b7d8-0d5d42e73873-kube-api-access-6wl7m\") pod \"9d3f76de-0b2a-4472-b7d8-0d5d42e73873\" (UID: \"9d3f76de-0b2a-4472-b7d8-0d5d42e73873\") " Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.049686 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d3f76de-0b2a-4472-b7d8-0d5d42e73873-utilities" (OuterVolumeSpecName: "utilities") pod "9d3f76de-0b2a-4472-b7d8-0d5d42e73873" (UID: "9d3f76de-0b2a-4472-b7d8-0d5d42e73873"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.054538 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d3f76de-0b2a-4472-b7d8-0d5d42e73873-kube-api-access-6wl7m" (OuterVolumeSpecName: "kube-api-access-6wl7m") pod "9d3f76de-0b2a-4472-b7d8-0d5d42e73873" (UID: "9d3f76de-0b2a-4472-b7d8-0d5d42e73873"). InnerVolumeSpecName "kube-api-access-6wl7m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.095980 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d3f76de-0b2a-4472-b7d8-0d5d42e73873-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9d3f76de-0b2a-4472-b7d8-0d5d42e73873" (UID: "9d3f76de-0b2a-4472-b7d8-0d5d42e73873"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.149893 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d3f76de-0b2a-4472-b7d8-0d5d42e73873-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.150103 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d3f76de-0b2a-4472-b7d8-0d5d42e73873-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.150169 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6wl7m\" (UniqueName: \"kubernetes.io/projected/9d3f76de-0b2a-4472-b7d8-0d5d42e73873-kube-api-access-6wl7m\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.321821 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n" event={"ID":"65b602be-5783-491e-b2e6-f8abd937820e","Type":"ContainerStarted","Data":"9512f5b35c11dcc8dbc965d03f55f17455e1c584b2614167433827cbb7599547"} Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.322013 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.323184 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp" event={"ID":"4c4fc2c2-42ae-4899-b718-b86cbde512ab","Type":"ContainerStarted","Data":"e6acc4b07c391fbcc6853d326390bec5838a29fa11bcad47cee4c55462045ecd"} Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.323353 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.325820 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8gzrl" event={"ID":"9d3f76de-0b2a-4472-b7d8-0d5d42e73873","Type":"ContainerDied","Data":"76fc978d7c8b9cef8a42f4b80490f6178ee16c8993d134c644d784711a39883a"} Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.325873 4857 scope.go:117] "RemoveContainer" containerID="a4c37c876f2ac66c68dac66c1111fa7d099872eec9a6c3f8b6634e475ccb4b59" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.326010 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8gzrl" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.367130 4857 scope.go:117] "RemoveContainer" containerID="4af1c8f375296c1c0cd80c747b7ded579a7459c100b5f67576493a90dced9f04" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.368955 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n" podStartSLOduration=2.449675499 podStartE2EDuration="10.368934334s" podCreationTimestamp="2025-11-28 13:33:46 +0000 UTC" firstStartedPulling="2025-11-28 13:33:48.080539935 +0000 UTC m=+920.107915102" lastFinishedPulling="2025-11-28 13:33:55.99979877 +0000 UTC m=+928.027173937" observedRunningTime="2025-11-28 13:33:56.368157272 +0000 UTC m=+928.395532439" watchObservedRunningTime="2025-11-28 13:33:56.368934334 +0000 UTC m=+928.396309511" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.394816 4857 scope.go:117] "RemoveContainer" containerID="34d0e3edebb253f3063d7a32fd2238331437721018b5e0afbfbdced84b623f22" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.396768 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp" podStartSLOduration=2.195858382 podStartE2EDuration="10.396731222s" podCreationTimestamp="2025-11-28 13:33:46 +0000 UTC" firstStartedPulling="2025-11-28 13:33:47.77845711 +0000 UTC m=+919.805832287" lastFinishedPulling="2025-11-28 13:33:55.97932995 +0000 UTC m=+928.006705127" observedRunningTime="2025-11-28 13:33:56.392682577 +0000 UTC m=+928.420057754" watchObservedRunningTime="2025-11-28 13:33:56.396731222 +0000 UTC m=+928.424106399" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.415369 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8gzrl"] Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.417593 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8gzrl"] Nov 28 13:33:58 crc kubenswrapper[4857]: I1128 13:33:58.316775 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d3f76de-0b2a-4472-b7d8-0d5d42e73873" path="/var/lib/kubelet/pods/9d3f76de-0b2a-4472-b7d8-0d5d42e73873/volumes" Nov 28 13:34:03 crc kubenswrapper[4857]: I1128 13:34:03.178424 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:34:03 crc kubenswrapper[4857]: I1128 13:34:03.179143 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:34:07 crc kubenswrapper[4857]: I1128 13:34:07.367980 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-77fd5fc7fd-q549n" Nov 28 13:34:26 crc kubenswrapper[4857]: I1128 13:34:26.982101 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-8475cb8447-qrwdp" Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.886651 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-nkjkb"] Nov 28 13:34:27 crc kubenswrapper[4857]: E1128 13:34:27.886948 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d3f76de-0b2a-4472-b7d8-0d5d42e73873" containerName="extract-utilities" Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.886967 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d3f76de-0b2a-4472-b7d8-0d5d42e73873" containerName="extract-utilities" Nov 28 13:34:27 crc kubenswrapper[4857]: E1128 13:34:27.886980 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d3f76de-0b2a-4472-b7d8-0d5d42e73873" containerName="extract-content" Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.886988 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d3f76de-0b2a-4472-b7d8-0d5d42e73873" containerName="extract-content" Nov 28 13:34:27 crc kubenswrapper[4857]: E1128 13:34:27.887004 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d3f76de-0b2a-4472-b7d8-0d5d42e73873" containerName="registry-server" Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.887014 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d3f76de-0b2a-4472-b7d8-0d5d42e73873" containerName="registry-server" Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.887160 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d3f76de-0b2a-4472-b7d8-0d5d42e73873" containerName="registry-server" Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.887652 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-nkjkb" Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.889739 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.889842 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-5dwrw" Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.891598 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-m7fq9"] Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.895006 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.896427 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-nkjkb"] Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.896739 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.896817 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.011555 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-xhclh"] Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.012617 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-xhclh" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.016915 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.020458 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.020677 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.021199 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-zxvhh" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.027983 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-f8648f98b-sp9cd"] Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.029037 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-sp9cd" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.033155 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vn98d\" (UniqueName: \"kubernetes.io/projected/0904931a-1033-4fd7-a34e-6a30ced4ec31-kube-api-access-vn98d\") pod \"frr-k8s-webhook-server-7fcb986d4-nkjkb\" (UID: \"0904931a-1033-4fd7-a34e-6a30ced4ec31\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-nkjkb" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.033196 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/981ad778-03ff-4a46-b35e-f670fe146521-frr-sockets\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.033233 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/981ad778-03ff-4a46-b35e-f670fe146521-metrics-certs\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.033253 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0904931a-1033-4fd7-a34e-6a30ced4ec31-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-nkjkb\" (UID: \"0904931a-1033-4fd7-a34e-6a30ced4ec31\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-nkjkb" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.033296 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/981ad778-03ff-4a46-b35e-f670fe146521-metrics\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.033314 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pktpz\" (UniqueName: \"kubernetes.io/projected/981ad778-03ff-4a46-b35e-f670fe146521-kube-api-access-pktpz\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.033333 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/981ad778-03ff-4a46-b35e-f670fe146521-frr-conf\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.033682 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/981ad778-03ff-4a46-b35e-f670fe146521-reloader\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.033732 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/981ad778-03ff-4a46-b35e-f670fe146521-frr-startup\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.042374 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.047401 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-sp9cd"] Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.134374 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pktpz\" (UniqueName: \"kubernetes.io/projected/981ad778-03ff-4a46-b35e-f670fe146521-kube-api-access-pktpz\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.134428 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/981ad778-03ff-4a46-b35e-f670fe146521-frr-conf\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.134555 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7zcp\" (UniqueName: \"kubernetes.io/projected/fcc84880-b152-4494-b739-40d81a896a41-kube-api-access-p7zcp\") pod \"speaker-xhclh\" (UID: \"fcc84880-b152-4494-b739-40d81a896a41\") " pod="metallb-system/speaker-xhclh" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.134616 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/981ad778-03ff-4a46-b35e-f670fe146521-reloader\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.134851 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/981ad778-03ff-4a46-b35e-f670fe146521-frr-conf\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.134887 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8e85b575-3f41-459a-b987-663c3fc8cd4c-metrics-certs\") pod \"controller-f8648f98b-sp9cd\" (UID: \"8e85b575-3f41-459a-b987-663c3fc8cd4c\") " pod="metallb-system/controller-f8648f98b-sp9cd" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.134955 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/981ad778-03ff-4a46-b35e-f670fe146521-frr-startup\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.135003 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fcc84880-b152-4494-b739-40d81a896a41-metrics-certs\") pod \"speaker-xhclh\" (UID: \"fcc84880-b152-4494-b739-40d81a896a41\") " pod="metallb-system/speaker-xhclh" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.135023 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fcc84880-b152-4494-b739-40d81a896a41-memberlist\") pod \"speaker-xhclh\" (UID: \"fcc84880-b152-4494-b739-40d81a896a41\") " pod="metallb-system/speaker-xhclh" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.135023 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/981ad778-03ff-4a46-b35e-f670fe146521-reloader\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.135059 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vn98d\" (UniqueName: \"kubernetes.io/projected/0904931a-1033-4fd7-a34e-6a30ced4ec31-kube-api-access-vn98d\") pod \"frr-k8s-webhook-server-7fcb986d4-nkjkb\" (UID: \"0904931a-1033-4fd7-a34e-6a30ced4ec31\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-nkjkb" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.135090 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/981ad778-03ff-4a46-b35e-f670fe146521-frr-sockets\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.135129 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8e85b575-3f41-459a-b987-663c3fc8cd4c-cert\") pod \"controller-f8648f98b-sp9cd\" (UID: \"8e85b575-3f41-459a-b987-663c3fc8cd4c\") " pod="metallb-system/controller-f8648f98b-sp9cd" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.135190 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/981ad778-03ff-4a46-b35e-f670fe146521-metrics-certs\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.135210 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0904931a-1033-4fd7-a34e-6a30ced4ec31-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-nkjkb\" (UID: \"0904931a-1033-4fd7-a34e-6a30ced4ec31\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-nkjkb" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.135246 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krbpw\" (UniqueName: \"kubernetes.io/projected/8e85b575-3f41-459a-b987-663c3fc8cd4c-kube-api-access-krbpw\") pod \"controller-f8648f98b-sp9cd\" (UID: \"8e85b575-3f41-459a-b987-663c3fc8cd4c\") " pod="metallb-system/controller-f8648f98b-sp9cd" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.135309 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/fcc84880-b152-4494-b739-40d81a896a41-metallb-excludel2\") pod \"speaker-xhclh\" (UID: \"fcc84880-b152-4494-b739-40d81a896a41\") " pod="metallb-system/speaker-xhclh" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.135345 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/981ad778-03ff-4a46-b35e-f670fe146521-metrics\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.135437 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/981ad778-03ff-4a46-b35e-f670fe146521-frr-sockets\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.135589 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/981ad778-03ff-4a46-b35e-f670fe146521-metrics\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.135915 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/981ad778-03ff-4a46-b35e-f670fe146521-frr-startup\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.140402 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/981ad778-03ff-4a46-b35e-f670fe146521-metrics-certs\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.144374 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0904931a-1033-4fd7-a34e-6a30ced4ec31-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-nkjkb\" (UID: \"0904931a-1033-4fd7-a34e-6a30ced4ec31\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-nkjkb" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.153727 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vn98d\" (UniqueName: \"kubernetes.io/projected/0904931a-1033-4fd7-a34e-6a30ced4ec31-kube-api-access-vn98d\") pod \"frr-k8s-webhook-server-7fcb986d4-nkjkb\" (UID: \"0904931a-1033-4fd7-a34e-6a30ced4ec31\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-nkjkb" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.156413 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pktpz\" (UniqueName: \"kubernetes.io/projected/981ad778-03ff-4a46-b35e-f670fe146521-kube-api-access-pktpz\") pod \"frr-k8s-m7fq9\" (UID: \"981ad778-03ff-4a46-b35e-f670fe146521\") " pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.211390 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-nkjkb" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.219864 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.236831 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7zcp\" (UniqueName: \"kubernetes.io/projected/fcc84880-b152-4494-b739-40d81a896a41-kube-api-access-p7zcp\") pod \"speaker-xhclh\" (UID: \"fcc84880-b152-4494-b739-40d81a896a41\") " pod="metallb-system/speaker-xhclh" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.237092 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8e85b575-3f41-459a-b987-663c3fc8cd4c-metrics-certs\") pod \"controller-f8648f98b-sp9cd\" (UID: \"8e85b575-3f41-459a-b987-663c3fc8cd4c\") " pod="metallb-system/controller-f8648f98b-sp9cd" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.237121 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fcc84880-b152-4494-b739-40d81a896a41-metrics-certs\") pod \"speaker-xhclh\" (UID: \"fcc84880-b152-4494-b739-40d81a896a41\") " pod="metallb-system/speaker-xhclh" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.237140 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fcc84880-b152-4494-b739-40d81a896a41-memberlist\") pod \"speaker-xhclh\" (UID: \"fcc84880-b152-4494-b739-40d81a896a41\") " pod="metallb-system/speaker-xhclh" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.237161 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8e85b575-3f41-459a-b987-663c3fc8cd4c-cert\") pod \"controller-f8648f98b-sp9cd\" (UID: \"8e85b575-3f41-459a-b987-663c3fc8cd4c\") " pod="metallb-system/controller-f8648f98b-sp9cd" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.237197 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krbpw\" (UniqueName: \"kubernetes.io/projected/8e85b575-3f41-459a-b987-663c3fc8cd4c-kube-api-access-krbpw\") pod \"controller-f8648f98b-sp9cd\" (UID: \"8e85b575-3f41-459a-b987-663c3fc8cd4c\") " pod="metallb-system/controller-f8648f98b-sp9cd" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.237222 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/fcc84880-b152-4494-b739-40d81a896a41-metallb-excludel2\") pod \"speaker-xhclh\" (UID: \"fcc84880-b152-4494-b739-40d81a896a41\") " pod="metallb-system/speaker-xhclh" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.241217 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.241419 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.241588 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.241727 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.242156 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 28 13:34:28 crc kubenswrapper[4857]: E1128 13:34:28.249036 4857 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 28 13:34:28 crc kubenswrapper[4857]: E1128 13:34:28.249114 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fcc84880-b152-4494-b739-40d81a896a41-memberlist podName:fcc84880-b152-4494-b739-40d81a896a41 nodeName:}" failed. No retries permitted until 2025-11-28 13:34:28.749091461 +0000 UTC m=+960.776466628 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/fcc84880-b152-4494-b739-40d81a896a41-memberlist") pod "speaker-xhclh" (UID: "fcc84880-b152-4494-b739-40d81a896a41") : secret "metallb-memberlist" not found Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.250870 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/fcc84880-b152-4494-b739-40d81a896a41-metallb-excludel2\") pod \"speaker-xhclh\" (UID: \"fcc84880-b152-4494-b739-40d81a896a41\") " pod="metallb-system/speaker-xhclh" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.252576 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8e85b575-3f41-459a-b987-663c3fc8cd4c-cert\") pod \"controller-f8648f98b-sp9cd\" (UID: \"8e85b575-3f41-459a-b987-663c3fc8cd4c\") " pod="metallb-system/controller-f8648f98b-sp9cd" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.253073 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8e85b575-3f41-459a-b987-663c3fc8cd4c-metrics-certs\") pod \"controller-f8648f98b-sp9cd\" (UID: \"8e85b575-3f41-459a-b987-663c3fc8cd4c\") " pod="metallb-system/controller-f8648f98b-sp9cd" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.253302 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fcc84880-b152-4494-b739-40d81a896a41-metrics-certs\") pod \"speaker-xhclh\" (UID: \"fcc84880-b152-4494-b739-40d81a896a41\") " pod="metallb-system/speaker-xhclh" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.254429 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7zcp\" (UniqueName: \"kubernetes.io/projected/fcc84880-b152-4494-b739-40d81a896a41-kube-api-access-p7zcp\") pod \"speaker-xhclh\" (UID: \"fcc84880-b152-4494-b739-40d81a896a41\") " pod="metallb-system/speaker-xhclh" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.265301 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krbpw\" (UniqueName: \"kubernetes.io/projected/8e85b575-3f41-459a-b987-663c3fc8cd4c-kube-api-access-krbpw\") pod \"controller-f8648f98b-sp9cd\" (UID: \"8e85b575-3f41-459a-b987-663c3fc8cd4c\") " pod="metallb-system/controller-f8648f98b-sp9cd" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.349349 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-sp9cd" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.527886 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-m7fq9" event={"ID":"981ad778-03ff-4a46-b35e-f670fe146521","Type":"ContainerStarted","Data":"c63dcecca4bcb7745f0636103a34178eb14971f4a59473dcd90d2b06575ae003"} Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.532413 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-sp9cd"] Nov 28 13:34:28 crc kubenswrapper[4857]: W1128 13:34:28.537226 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e85b575_3f41_459a_b987_663c3fc8cd4c.slice/crio-dd4436c82034cf625f6e299838876313535a62daf06673de961827ae26f622e6 WatchSource:0}: Error finding container dd4436c82034cf625f6e299838876313535a62daf06673de961827ae26f622e6: Status 404 returned error can't find the container with id dd4436c82034cf625f6e299838876313535a62daf06673de961827ae26f622e6 Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.602482 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-nkjkb"] Nov 28 13:34:28 crc kubenswrapper[4857]: W1128 13:34:28.612314 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0904931a_1033_4fd7_a34e_6a30ced4ec31.slice/crio-1f4de9dd2254fcdbfd0d6ca5b002a61d754ce5a9f8b6f1767e44df5ee62bcbec WatchSource:0}: Error finding container 1f4de9dd2254fcdbfd0d6ca5b002a61d754ce5a9f8b6f1767e44df5ee62bcbec: Status 404 returned error can't find the container with id 1f4de9dd2254fcdbfd0d6ca5b002a61d754ce5a9f8b6f1767e44df5ee62bcbec Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.841242 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fcc84880-b152-4494-b739-40d81a896a41-memberlist\") pod \"speaker-xhclh\" (UID: \"fcc84880-b152-4494-b739-40d81a896a41\") " pod="metallb-system/speaker-xhclh" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.846898 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/fcc84880-b152-4494-b739-40d81a896a41-memberlist\") pod \"speaker-xhclh\" (UID: \"fcc84880-b152-4494-b739-40d81a896a41\") " pod="metallb-system/speaker-xhclh" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.930496 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-zxvhh" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.939072 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-xhclh" Nov 28 13:34:28 crc kubenswrapper[4857]: W1128 13:34:28.958474 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfcc84880_b152_4494_b739_40d81a896a41.slice/crio-2bf37c1f7d27b040d84344dccce7dc60f8aea80adf93a375d1ab003d8710ecfd WatchSource:0}: Error finding container 2bf37c1f7d27b040d84344dccce7dc60f8aea80adf93a375d1ab003d8710ecfd: Status 404 returned error can't find the container with id 2bf37c1f7d27b040d84344dccce7dc60f8aea80adf93a375d1ab003d8710ecfd Nov 28 13:34:29 crc kubenswrapper[4857]: I1128 13:34:29.534297 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-xhclh" event={"ID":"fcc84880-b152-4494-b739-40d81a896a41","Type":"ContainerStarted","Data":"76563a1610a4f1428a62ec58e56131162e0d80d3479d5bfad408d198658a447b"} Nov 28 13:34:29 crc kubenswrapper[4857]: I1128 13:34:29.534340 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-xhclh" event={"ID":"fcc84880-b152-4494-b739-40d81a896a41","Type":"ContainerStarted","Data":"2bf37c1f7d27b040d84344dccce7dc60f8aea80adf93a375d1ab003d8710ecfd"} Nov 28 13:34:29 crc kubenswrapper[4857]: I1128 13:34:29.535496 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-nkjkb" event={"ID":"0904931a-1033-4fd7-a34e-6a30ced4ec31","Type":"ContainerStarted","Data":"1f4de9dd2254fcdbfd0d6ca5b002a61d754ce5a9f8b6f1767e44df5ee62bcbec"} Nov 28 13:34:29 crc kubenswrapper[4857]: I1128 13:34:29.536867 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-sp9cd" event={"ID":"8e85b575-3f41-459a-b987-663c3fc8cd4c","Type":"ContainerStarted","Data":"6ecba8256de471585c78a167bcd48d4abb37c5722a8e74bf9f33b08adf56f92e"} Nov 28 13:34:29 crc kubenswrapper[4857]: I1128 13:34:29.536895 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-sp9cd" event={"ID":"8e85b575-3f41-459a-b987-663c3fc8cd4c","Type":"ContainerStarted","Data":"1f9b854e08739fbbec0ed9ab21998a44a64abc3bbbc9963cbfcd365b90f0486c"} Nov 28 13:34:29 crc kubenswrapper[4857]: I1128 13:34:29.536908 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-sp9cd" event={"ID":"8e85b575-3f41-459a-b987-663c3fc8cd4c","Type":"ContainerStarted","Data":"dd4436c82034cf625f6e299838876313535a62daf06673de961827ae26f622e6"} Nov 28 13:34:29 crc kubenswrapper[4857]: I1128 13:34:29.537051 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-f8648f98b-sp9cd" Nov 28 13:34:29 crc kubenswrapper[4857]: I1128 13:34:29.556679 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-f8648f98b-sp9cd" podStartSLOduration=2.5566541430000003 podStartE2EDuration="2.556654143s" podCreationTimestamp="2025-11-28 13:34:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:34:29.549556982 +0000 UTC m=+961.576932149" watchObservedRunningTime="2025-11-28 13:34:29.556654143 +0000 UTC m=+961.584029350" Nov 28 13:34:30 crc kubenswrapper[4857]: I1128 13:34:30.545141 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-xhclh" event={"ID":"fcc84880-b152-4494-b739-40d81a896a41","Type":"ContainerStarted","Data":"002b7ecce2df4a4b8901698a7fe9fa7a031026c482157a133349ed84461c78ce"} Nov 28 13:34:30 crc kubenswrapper[4857]: I1128 13:34:30.545442 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-xhclh" Nov 28 13:34:30 crc kubenswrapper[4857]: I1128 13:34:30.571425 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-xhclh" podStartSLOduration=3.571401662 podStartE2EDuration="3.571401662s" podCreationTimestamp="2025-11-28 13:34:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:34:30.566826792 +0000 UTC m=+962.594201969" watchObservedRunningTime="2025-11-28 13:34:30.571401662 +0000 UTC m=+962.598776839" Nov 28 13:34:33 crc kubenswrapper[4857]: I1128 13:34:33.178320 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:34:33 crc kubenswrapper[4857]: I1128 13:34:33.178685 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:34:33 crc kubenswrapper[4857]: I1128 13:34:33.179638 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:34:33 crc kubenswrapper[4857]: I1128 13:34:33.180645 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2e8dd17747c47de8cbeb5abc3c3cfa11211aa6c3f675d9205fe31b2543798131"} pod="openshift-machine-config-operator/machine-config-daemon-jdgls" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 13:34:33 crc kubenswrapper[4857]: I1128 13:34:33.180699 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" containerID="cri-o://2e8dd17747c47de8cbeb5abc3c3cfa11211aa6c3f675d9205fe31b2543798131" gracePeriod=600 Nov 28 13:34:33 crc kubenswrapper[4857]: I1128 13:34:33.585031 4857 generic.go:334] "Generic (PLEG): container finished" podID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerID="2e8dd17747c47de8cbeb5abc3c3cfa11211aa6c3f675d9205fe31b2543798131" exitCode=0 Nov 28 13:34:33 crc kubenswrapper[4857]: I1128 13:34:33.585063 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerDied","Data":"2e8dd17747c47de8cbeb5abc3c3cfa11211aa6c3f675d9205fe31b2543798131"} Nov 28 13:34:33 crc kubenswrapper[4857]: I1128 13:34:33.585102 4857 scope.go:117] "RemoveContainer" containerID="a901816410aec0f9ca863ce99f942562aa81364027ff9914d88a03e73e8bd981" Nov 28 13:34:37 crc kubenswrapper[4857]: I1128 13:34:37.642943 4857 generic.go:334] "Generic (PLEG): container finished" podID="981ad778-03ff-4a46-b35e-f670fe146521" containerID="e8b04e4d1611e82993973e374a4b9775c75b13de89ef28567171fc208720dcf3" exitCode=0 Nov 28 13:34:37 crc kubenswrapper[4857]: I1128 13:34:37.642997 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-m7fq9" event={"ID":"981ad778-03ff-4a46-b35e-f670fe146521","Type":"ContainerDied","Data":"e8b04e4d1611e82993973e374a4b9775c75b13de89ef28567171fc208720dcf3"} Nov 28 13:34:37 crc kubenswrapper[4857]: I1128 13:34:37.647573 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerStarted","Data":"c7acb098908896eeec6673568d27f9b2d0362ab62a9a136da040ab452639a28c"} Nov 28 13:34:37 crc kubenswrapper[4857]: I1128 13:34:37.649411 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-nkjkb" event={"ID":"0904931a-1033-4fd7-a34e-6a30ced4ec31","Type":"ContainerStarted","Data":"4b9e60a27ebf64cad394b9943a13f3a9648f975606f0760298b0efa568378d96"} Nov 28 13:34:37 crc kubenswrapper[4857]: I1128 13:34:37.649628 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-nkjkb" Nov 28 13:34:37 crc kubenswrapper[4857]: I1128 13:34:37.697521 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-nkjkb" podStartSLOduration=1.866383844 podStartE2EDuration="10.697501492s" podCreationTimestamp="2025-11-28 13:34:27 +0000 UTC" firstStartedPulling="2025-11-28 13:34:28.618837083 +0000 UTC m=+960.646212250" lastFinishedPulling="2025-11-28 13:34:37.449954731 +0000 UTC m=+969.477329898" observedRunningTime="2025-11-28 13:34:37.692762318 +0000 UTC m=+969.720137485" watchObservedRunningTime="2025-11-28 13:34:37.697501492 +0000 UTC m=+969.724876659" Nov 28 13:34:38 crc kubenswrapper[4857]: I1128 13:34:38.354389 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-f8648f98b-sp9cd" Nov 28 13:34:38 crc kubenswrapper[4857]: I1128 13:34:38.655450 4857 generic.go:334] "Generic (PLEG): container finished" podID="981ad778-03ff-4a46-b35e-f670fe146521" containerID="125e68a41b76c86a7841a53a1ac8276976eb0f1aab01f62001b48153337fc19b" exitCode=0 Nov 28 13:34:38 crc kubenswrapper[4857]: I1128 13:34:38.655535 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-m7fq9" event={"ID":"981ad778-03ff-4a46-b35e-f670fe146521","Type":"ContainerDied","Data":"125e68a41b76c86a7841a53a1ac8276976eb0f1aab01f62001b48153337fc19b"} Nov 28 13:34:39 crc kubenswrapper[4857]: I1128 13:34:39.666329 4857 generic.go:334] "Generic (PLEG): container finished" podID="981ad778-03ff-4a46-b35e-f670fe146521" containerID="18926fc339a2ab7f490d88fc5e08ddf1b87d6f68fd509f1047239c44457b2f12" exitCode=0 Nov 28 13:34:39 crc kubenswrapper[4857]: I1128 13:34:39.666381 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-m7fq9" event={"ID":"981ad778-03ff-4a46-b35e-f670fe146521","Type":"ContainerDied","Data":"18926fc339a2ab7f490d88fc5e08ddf1b87d6f68fd509f1047239c44457b2f12"} Nov 28 13:34:40 crc kubenswrapper[4857]: I1128 13:34:40.678648 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-m7fq9" event={"ID":"981ad778-03ff-4a46-b35e-f670fe146521","Type":"ContainerStarted","Data":"fee2f02e64a920bbcd40230acabb1c0a1d277259679a43a46d431945f5e96529"} Nov 28 13:34:40 crc kubenswrapper[4857]: I1128 13:34:40.679241 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-m7fq9" event={"ID":"981ad778-03ff-4a46-b35e-f670fe146521","Type":"ContainerStarted","Data":"7079a43ffdc56055b446f4cddf5607adbcdb37c37b2e713e8eac245758d370d3"} Nov 28 13:34:40 crc kubenswrapper[4857]: I1128 13:34:40.679257 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-m7fq9" event={"ID":"981ad778-03ff-4a46-b35e-f670fe146521","Type":"ContainerStarted","Data":"b5c73d9e05e105d699c7ce2ac752ea39fc26c01c58c76d44b98955bb798196f6"} Nov 28 13:34:40 crc kubenswrapper[4857]: I1128 13:34:40.679268 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-m7fq9" event={"ID":"981ad778-03ff-4a46-b35e-f670fe146521","Type":"ContainerStarted","Data":"803fb306e4d12535842657fc1bdacdd8ebd33cfdb872d2decc379815ad48f35c"} Nov 28 13:34:40 crc kubenswrapper[4857]: I1128 13:34:40.679279 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-m7fq9" event={"ID":"981ad778-03ff-4a46-b35e-f670fe146521","Type":"ContainerStarted","Data":"26aa3a5c361983f5255ca6288e25f8f69fc74abb6b4918e8a7ac4cbc505c0d8d"} Nov 28 13:34:41 crc kubenswrapper[4857]: I1128 13:34:41.689569 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-m7fq9" event={"ID":"981ad778-03ff-4a46-b35e-f670fe146521","Type":"ContainerStarted","Data":"a54b9a4a041c97066a99354e5fafe0493e118f27660729113ada424312f8684b"} Nov 28 13:34:41 crc kubenswrapper[4857]: I1128 13:34:41.689910 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:41 crc kubenswrapper[4857]: I1128 13:34:41.713067 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-m7fq9" podStartSLOduration=5.616832262 podStartE2EDuration="14.713047028s" podCreationTimestamp="2025-11-28 13:34:27 +0000 UTC" firstStartedPulling="2025-11-28 13:34:28.374790131 +0000 UTC m=+960.402165298" lastFinishedPulling="2025-11-28 13:34:37.471004897 +0000 UTC m=+969.498380064" observedRunningTime="2025-11-28 13:34:41.708980613 +0000 UTC m=+973.736355800" watchObservedRunningTime="2025-11-28 13:34:41.713047028 +0000 UTC m=+973.740422195" Nov 28 13:34:43 crc kubenswrapper[4857]: I1128 13:34:43.221088 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:43 crc kubenswrapper[4857]: I1128 13:34:43.278220 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.216089 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-nkjkb" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.944131 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-xhclh" Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.657661 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6"] Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.659233 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.661397 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.671640 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6"] Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.849076 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dca407e9-a877-483e-82f3-1c1288b63d52-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6\" (UID: \"dca407e9-a877-483e-82f3-1c1288b63d52\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.849143 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9222j\" (UniqueName: \"kubernetes.io/projected/dca407e9-a877-483e-82f3-1c1288b63d52-kube-api-access-9222j\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6\" (UID: \"dca407e9-a877-483e-82f3-1c1288b63d52\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.849384 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dca407e9-a877-483e-82f3-1c1288b63d52-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6\" (UID: \"dca407e9-a877-483e-82f3-1c1288b63d52\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.950335 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dca407e9-a877-483e-82f3-1c1288b63d52-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6\" (UID: \"dca407e9-a877-483e-82f3-1c1288b63d52\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.950390 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9222j\" (UniqueName: \"kubernetes.io/projected/dca407e9-a877-483e-82f3-1c1288b63d52-kube-api-access-9222j\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6\" (UID: \"dca407e9-a877-483e-82f3-1c1288b63d52\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.950444 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dca407e9-a877-483e-82f3-1c1288b63d52-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6\" (UID: \"dca407e9-a877-483e-82f3-1c1288b63d52\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.951029 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dca407e9-a877-483e-82f3-1c1288b63d52-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6\" (UID: \"dca407e9-a877-483e-82f3-1c1288b63d52\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.951046 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dca407e9-a877-483e-82f3-1c1288b63d52-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6\" (UID: \"dca407e9-a877-483e-82f3-1c1288b63d52\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.973585 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9222j\" (UniqueName: \"kubernetes.io/projected/dca407e9-a877-483e-82f3-1c1288b63d52-kube-api-access-9222j\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6\" (UID: \"dca407e9-a877-483e-82f3-1c1288b63d52\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.977707 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" Nov 28 13:34:51 crc kubenswrapper[4857]: I1128 13:34:51.212184 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6"] Nov 28 13:34:51 crc kubenswrapper[4857]: I1128 13:34:51.841231 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" event={"ID":"dca407e9-a877-483e-82f3-1c1288b63d52","Type":"ContainerStarted","Data":"b1e55d22c29a3bef59c96b90526e44e949f184d8010300a96b2e3f15ac430c43"} Nov 28 13:34:52 crc kubenswrapper[4857]: I1128 13:34:52.849550 4857 generic.go:334] "Generic (PLEG): container finished" podID="dca407e9-a877-483e-82f3-1c1288b63d52" containerID="55d780521cbc1cacbca95e01cf028dbf1b51bdb2cb4b3062db9f32270dd7438a" exitCode=0 Nov 28 13:34:52 crc kubenswrapper[4857]: I1128 13:34:52.849599 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" event={"ID":"dca407e9-a877-483e-82f3-1c1288b63d52","Type":"ContainerDied","Data":"55d780521cbc1cacbca95e01cf028dbf1b51bdb2cb4b3062db9f32270dd7438a"} Nov 28 13:34:56 crc kubenswrapper[4857]: I1128 13:34:56.877656 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" event={"ID":"dca407e9-a877-483e-82f3-1c1288b63d52","Type":"ContainerStarted","Data":"1aed3aff95e9f4f76b0e6c766ce659743d96bb11aaede62d7e744e4b62398331"} Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.884398 4857 generic.go:334] "Generic (PLEG): container finished" podID="dca407e9-a877-483e-82f3-1c1288b63d52" containerID="1aed3aff95e9f4f76b0e6c766ce659743d96bb11aaede62d7e744e4b62398331" exitCode=0 Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.884459 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" event={"ID":"dca407e9-a877-483e-82f3-1c1288b63d52","Type":"ContainerDied","Data":"1aed3aff95e9f4f76b0e6c766ce659743d96bb11aaede62d7e744e4b62398331"} Nov 28 13:34:58 crc kubenswrapper[4857]: I1128 13:34:58.226916 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-m7fq9" Nov 28 13:34:59 crc kubenswrapper[4857]: I1128 13:34:59.903243 4857 generic.go:334] "Generic (PLEG): container finished" podID="dca407e9-a877-483e-82f3-1c1288b63d52" containerID="8c56917ed9afc42d319f1df871d4d6a248897be8300ba16c298f5bdb43d1a226" exitCode=0 Nov 28 13:34:59 crc kubenswrapper[4857]: I1128 13:34:59.903373 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" event={"ID":"dca407e9-a877-483e-82f3-1c1288b63d52","Type":"ContainerDied","Data":"8c56917ed9afc42d319f1df871d4d6a248897be8300ba16c298f5bdb43d1a226"} Nov 28 13:35:01 crc kubenswrapper[4857]: I1128 13:35:01.145227 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" Nov 28 13:35:01 crc kubenswrapper[4857]: I1128 13:35:01.253002 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9222j\" (UniqueName: \"kubernetes.io/projected/dca407e9-a877-483e-82f3-1c1288b63d52-kube-api-access-9222j\") pod \"dca407e9-a877-483e-82f3-1c1288b63d52\" (UID: \"dca407e9-a877-483e-82f3-1c1288b63d52\") " Nov 28 13:35:01 crc kubenswrapper[4857]: I1128 13:35:01.253054 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dca407e9-a877-483e-82f3-1c1288b63d52-util\") pod \"dca407e9-a877-483e-82f3-1c1288b63d52\" (UID: \"dca407e9-a877-483e-82f3-1c1288b63d52\") " Nov 28 13:35:01 crc kubenswrapper[4857]: I1128 13:35:01.253134 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dca407e9-a877-483e-82f3-1c1288b63d52-bundle\") pod \"dca407e9-a877-483e-82f3-1c1288b63d52\" (UID: \"dca407e9-a877-483e-82f3-1c1288b63d52\") " Nov 28 13:35:01 crc kubenswrapper[4857]: I1128 13:35:01.254663 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dca407e9-a877-483e-82f3-1c1288b63d52-bundle" (OuterVolumeSpecName: "bundle") pod "dca407e9-a877-483e-82f3-1c1288b63d52" (UID: "dca407e9-a877-483e-82f3-1c1288b63d52"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:35:01 crc kubenswrapper[4857]: I1128 13:35:01.259362 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dca407e9-a877-483e-82f3-1c1288b63d52-kube-api-access-9222j" (OuterVolumeSpecName: "kube-api-access-9222j") pod "dca407e9-a877-483e-82f3-1c1288b63d52" (UID: "dca407e9-a877-483e-82f3-1c1288b63d52"). InnerVolumeSpecName "kube-api-access-9222j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:35:01 crc kubenswrapper[4857]: I1128 13:35:01.264041 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dca407e9-a877-483e-82f3-1c1288b63d52-util" (OuterVolumeSpecName: "util") pod "dca407e9-a877-483e-82f3-1c1288b63d52" (UID: "dca407e9-a877-483e-82f3-1c1288b63d52"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:35:01 crc kubenswrapper[4857]: I1128 13:35:01.354691 4857 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dca407e9-a877-483e-82f3-1c1288b63d52-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:35:01 crc kubenswrapper[4857]: I1128 13:35:01.354725 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9222j\" (UniqueName: \"kubernetes.io/projected/dca407e9-a877-483e-82f3-1c1288b63d52-kube-api-access-9222j\") on node \"crc\" DevicePath \"\"" Nov 28 13:35:01 crc kubenswrapper[4857]: I1128 13:35:01.354736 4857 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dca407e9-a877-483e-82f3-1c1288b63d52-util\") on node \"crc\" DevicePath \"\"" Nov 28 13:35:01 crc kubenswrapper[4857]: I1128 13:35:01.926923 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" Nov 28 13:35:01 crc kubenswrapper[4857]: I1128 13:35:01.926885 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6" event={"ID":"dca407e9-a877-483e-82f3-1c1288b63d52","Type":"ContainerDied","Data":"b1e55d22c29a3bef59c96b90526e44e949f184d8010300a96b2e3f15ac430c43"} Nov 28 13:35:01 crc kubenswrapper[4857]: I1128 13:35:01.927048 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1e55d22c29a3bef59c96b90526e44e949f184d8010300a96b2e3f15ac430c43" Nov 28 13:35:06 crc kubenswrapper[4857]: I1128 13:35:06.672827 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dphmv"] Nov 28 13:35:06 crc kubenswrapper[4857]: E1128 13:35:06.673627 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dca407e9-a877-483e-82f3-1c1288b63d52" containerName="pull" Nov 28 13:35:06 crc kubenswrapper[4857]: I1128 13:35:06.673643 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="dca407e9-a877-483e-82f3-1c1288b63d52" containerName="pull" Nov 28 13:35:06 crc kubenswrapper[4857]: E1128 13:35:06.673672 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dca407e9-a877-483e-82f3-1c1288b63d52" containerName="util" Nov 28 13:35:06 crc kubenswrapper[4857]: I1128 13:35:06.673679 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="dca407e9-a877-483e-82f3-1c1288b63d52" containerName="util" Nov 28 13:35:06 crc kubenswrapper[4857]: E1128 13:35:06.673692 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dca407e9-a877-483e-82f3-1c1288b63d52" containerName="extract" Nov 28 13:35:06 crc kubenswrapper[4857]: I1128 13:35:06.673700 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="dca407e9-a877-483e-82f3-1c1288b63d52" containerName="extract" Nov 28 13:35:06 crc kubenswrapper[4857]: I1128 13:35:06.673861 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="dca407e9-a877-483e-82f3-1c1288b63d52" containerName="extract" Nov 28 13:35:06 crc kubenswrapper[4857]: I1128 13:35:06.674322 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dphmv" Nov 28 13:35:06 crc kubenswrapper[4857]: I1128 13:35:06.675980 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Nov 28 13:35:06 crc kubenswrapper[4857]: I1128 13:35:06.675983 4857 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-kswqs" Nov 28 13:35:06 crc kubenswrapper[4857]: I1128 13:35:06.677057 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Nov 28 13:35:06 crc kubenswrapper[4857]: I1128 13:35:06.703476 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dphmv"] Nov 28 13:35:06 crc kubenswrapper[4857]: I1128 13:35:06.849942 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvpnf\" (UniqueName: \"kubernetes.io/projected/6a556b35-82f4-434f-8eaf-d4339fc68f18-kube-api-access-kvpnf\") pod \"cert-manager-operator-controller-manager-64cf6dff88-dphmv\" (UID: \"6a556b35-82f4-434f-8eaf-d4339fc68f18\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dphmv" Nov 28 13:35:06 crc kubenswrapper[4857]: I1128 13:35:06.850018 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/6a556b35-82f4-434f-8eaf-d4339fc68f18-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-dphmv\" (UID: \"6a556b35-82f4-434f-8eaf-d4339fc68f18\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dphmv" Nov 28 13:35:06 crc kubenswrapper[4857]: I1128 13:35:06.951331 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvpnf\" (UniqueName: \"kubernetes.io/projected/6a556b35-82f4-434f-8eaf-d4339fc68f18-kube-api-access-kvpnf\") pod \"cert-manager-operator-controller-manager-64cf6dff88-dphmv\" (UID: \"6a556b35-82f4-434f-8eaf-d4339fc68f18\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dphmv" Nov 28 13:35:06 crc kubenswrapper[4857]: I1128 13:35:06.951412 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/6a556b35-82f4-434f-8eaf-d4339fc68f18-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-dphmv\" (UID: \"6a556b35-82f4-434f-8eaf-d4339fc68f18\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dphmv" Nov 28 13:35:06 crc kubenswrapper[4857]: I1128 13:35:06.951975 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/6a556b35-82f4-434f-8eaf-d4339fc68f18-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-dphmv\" (UID: \"6a556b35-82f4-434f-8eaf-d4339fc68f18\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dphmv" Nov 28 13:35:06 crc kubenswrapper[4857]: I1128 13:35:06.973659 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvpnf\" (UniqueName: \"kubernetes.io/projected/6a556b35-82f4-434f-8eaf-d4339fc68f18-kube-api-access-kvpnf\") pod \"cert-manager-operator-controller-manager-64cf6dff88-dphmv\" (UID: \"6a556b35-82f4-434f-8eaf-d4339fc68f18\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dphmv" Nov 28 13:35:06 crc kubenswrapper[4857]: I1128 13:35:06.990395 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dphmv" Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.243734 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dphmv"] Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.962020 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dphmv" event={"ID":"6a556b35-82f4-434f-8eaf-d4339fc68f18","Type":"ContainerStarted","Data":"a7fe5f6e9f720e727068165d93782ac640505c2b2536afda9a8002c24dfd5e3b"} Nov 28 13:35:17 crc kubenswrapper[4857]: I1128 13:35:17.097761 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dphmv" event={"ID":"6a556b35-82f4-434f-8eaf-d4339fc68f18","Type":"ContainerStarted","Data":"b2fbbb8c05871dbef241aa35206eda451cd4f6e2ab07a7f77ad96e9b3f5d1aee"} Nov 28 13:35:17 crc kubenswrapper[4857]: I1128 13:35:17.123426 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-dphmv" podStartSLOduration=2.106842784 podStartE2EDuration="11.12340659s" podCreationTimestamp="2025-11-28 13:35:06 +0000 UTC" firstStartedPulling="2025-11-28 13:35:07.257053374 +0000 UTC m=+999.284428561" lastFinishedPulling="2025-11-28 13:35:16.27361721 +0000 UTC m=+1008.300992367" observedRunningTime="2025-11-28 13:35:17.120829898 +0000 UTC m=+1009.148205105" watchObservedRunningTime="2025-11-28 13:35:17.12340659 +0000 UTC m=+1009.150781767" Nov 28 13:35:21 crc kubenswrapper[4857]: I1128 13:35:21.551714 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-cvgnl"] Nov 28 13:35:21 crc kubenswrapper[4857]: I1128 13:35:21.553072 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-cvgnl" Nov 28 13:35:21 crc kubenswrapper[4857]: I1128 13:35:21.555291 4857 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-rzdnv" Nov 28 13:35:21 crc kubenswrapper[4857]: I1128 13:35:21.555704 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 28 13:35:21 crc kubenswrapper[4857]: I1128 13:35:21.556956 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 28 13:35:21 crc kubenswrapper[4857]: I1128 13:35:21.562179 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-cvgnl"] Nov 28 13:35:21 crc kubenswrapper[4857]: I1128 13:35:21.578069 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/28d034d3-c255-40c7-b019-6b71be38c34f-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-cvgnl\" (UID: \"28d034d3-c255-40c7-b019-6b71be38c34f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-cvgnl" Nov 28 13:35:21 crc kubenswrapper[4857]: I1128 13:35:21.578178 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sb7wn\" (UniqueName: \"kubernetes.io/projected/28d034d3-c255-40c7-b019-6b71be38c34f-kube-api-access-sb7wn\") pod \"cert-manager-cainjector-855d9ccff4-cvgnl\" (UID: \"28d034d3-c255-40c7-b019-6b71be38c34f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-cvgnl" Nov 28 13:35:21 crc kubenswrapper[4857]: I1128 13:35:21.681375 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sb7wn\" (UniqueName: \"kubernetes.io/projected/28d034d3-c255-40c7-b019-6b71be38c34f-kube-api-access-sb7wn\") pod \"cert-manager-cainjector-855d9ccff4-cvgnl\" (UID: \"28d034d3-c255-40c7-b019-6b71be38c34f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-cvgnl" Nov 28 13:35:21 crc kubenswrapper[4857]: I1128 13:35:21.681691 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/28d034d3-c255-40c7-b019-6b71be38c34f-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-cvgnl\" (UID: \"28d034d3-c255-40c7-b019-6b71be38c34f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-cvgnl" Nov 28 13:35:21 crc kubenswrapper[4857]: I1128 13:35:21.698611 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/28d034d3-c255-40c7-b019-6b71be38c34f-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-cvgnl\" (UID: \"28d034d3-c255-40c7-b019-6b71be38c34f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-cvgnl" Nov 28 13:35:21 crc kubenswrapper[4857]: I1128 13:35:21.700705 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sb7wn\" (UniqueName: \"kubernetes.io/projected/28d034d3-c255-40c7-b019-6b71be38c34f-kube-api-access-sb7wn\") pod \"cert-manager-cainjector-855d9ccff4-cvgnl\" (UID: \"28d034d3-c255-40c7-b019-6b71be38c34f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-cvgnl" Nov 28 13:35:21 crc kubenswrapper[4857]: I1128 13:35:21.873120 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-cvgnl" Nov 28 13:35:22 crc kubenswrapper[4857]: I1128 13:35:22.087555 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-cvgnl"] Nov 28 13:35:22 crc kubenswrapper[4857]: I1128 13:35:22.125105 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-cvgnl" event={"ID":"28d034d3-c255-40c7-b019-6b71be38c34f","Type":"ContainerStarted","Data":"34acd729afeb4a63d28ce3e0cc18117bb12f3b121b946b45f9da0cb673ce09fd"} Nov 28 13:35:22 crc kubenswrapper[4857]: I1128 13:35:22.477579 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7tpch"] Nov 28 13:35:22 crc kubenswrapper[4857]: I1128 13:35:22.478707 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7tpch" Nov 28 13:35:22 crc kubenswrapper[4857]: I1128 13:35:22.490033 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7tpch"] Nov 28 13:35:22 crc kubenswrapper[4857]: I1128 13:35:22.592786 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/743c1056-0b38-4526-ae21-ef17f20596d2-utilities\") pod \"community-operators-7tpch\" (UID: \"743c1056-0b38-4526-ae21-ef17f20596d2\") " pod="openshift-marketplace/community-operators-7tpch" Nov 28 13:35:22 crc kubenswrapper[4857]: I1128 13:35:22.592842 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/743c1056-0b38-4526-ae21-ef17f20596d2-catalog-content\") pod \"community-operators-7tpch\" (UID: \"743c1056-0b38-4526-ae21-ef17f20596d2\") " pod="openshift-marketplace/community-operators-7tpch" Nov 28 13:35:22 crc kubenswrapper[4857]: I1128 13:35:22.592873 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b79cc\" (UniqueName: \"kubernetes.io/projected/743c1056-0b38-4526-ae21-ef17f20596d2-kube-api-access-b79cc\") pod \"community-operators-7tpch\" (UID: \"743c1056-0b38-4526-ae21-ef17f20596d2\") " pod="openshift-marketplace/community-operators-7tpch" Nov 28 13:35:22 crc kubenswrapper[4857]: I1128 13:35:22.694643 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/743c1056-0b38-4526-ae21-ef17f20596d2-utilities\") pod \"community-operators-7tpch\" (UID: \"743c1056-0b38-4526-ae21-ef17f20596d2\") " pod="openshift-marketplace/community-operators-7tpch" Nov 28 13:35:22 crc kubenswrapper[4857]: I1128 13:35:22.694705 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/743c1056-0b38-4526-ae21-ef17f20596d2-catalog-content\") pod \"community-operators-7tpch\" (UID: \"743c1056-0b38-4526-ae21-ef17f20596d2\") " pod="openshift-marketplace/community-operators-7tpch" Nov 28 13:35:22 crc kubenswrapper[4857]: I1128 13:35:22.694735 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b79cc\" (UniqueName: \"kubernetes.io/projected/743c1056-0b38-4526-ae21-ef17f20596d2-kube-api-access-b79cc\") pod \"community-operators-7tpch\" (UID: \"743c1056-0b38-4526-ae21-ef17f20596d2\") " pod="openshift-marketplace/community-operators-7tpch" Nov 28 13:35:22 crc kubenswrapper[4857]: I1128 13:35:22.695157 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/743c1056-0b38-4526-ae21-ef17f20596d2-utilities\") pod \"community-operators-7tpch\" (UID: \"743c1056-0b38-4526-ae21-ef17f20596d2\") " pod="openshift-marketplace/community-operators-7tpch" Nov 28 13:35:22 crc kubenswrapper[4857]: I1128 13:35:22.695235 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/743c1056-0b38-4526-ae21-ef17f20596d2-catalog-content\") pod \"community-operators-7tpch\" (UID: \"743c1056-0b38-4526-ae21-ef17f20596d2\") " pod="openshift-marketplace/community-operators-7tpch" Nov 28 13:35:22 crc kubenswrapper[4857]: I1128 13:35:22.716897 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b79cc\" (UniqueName: \"kubernetes.io/projected/743c1056-0b38-4526-ae21-ef17f20596d2-kube-api-access-b79cc\") pod \"community-operators-7tpch\" (UID: \"743c1056-0b38-4526-ae21-ef17f20596d2\") " pod="openshift-marketplace/community-operators-7tpch" Nov 28 13:35:22 crc kubenswrapper[4857]: I1128 13:35:22.809900 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7tpch" Nov 28 13:35:23 crc kubenswrapper[4857]: I1128 13:35:23.049371 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7tpch"] Nov 28 13:35:23 crc kubenswrapper[4857]: I1128 13:35:23.130908 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tpch" event={"ID":"743c1056-0b38-4526-ae21-ef17f20596d2","Type":"ContainerStarted","Data":"555a2035b02572e2ea93f13b8302fc51be01908c34fa25333a9e552c3c8f0b3b"} Nov 28 13:35:24 crc kubenswrapper[4857]: I1128 13:35:24.150434 4857 generic.go:334] "Generic (PLEG): container finished" podID="743c1056-0b38-4526-ae21-ef17f20596d2" containerID="f349bbc65b50908f85ca6d96697079d5ad2e62b1e2706bdaeb29ab4736747e8b" exitCode=0 Nov 28 13:35:24 crc kubenswrapper[4857]: I1128 13:35:24.150493 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tpch" event={"ID":"743c1056-0b38-4526-ae21-ef17f20596d2","Type":"ContainerDied","Data":"f349bbc65b50908f85ca6d96697079d5ad2e62b1e2706bdaeb29ab4736747e8b"} Nov 28 13:35:25 crc kubenswrapper[4857]: I1128 13:35:25.692668 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-rdxp4"] Nov 28 13:35:25 crc kubenswrapper[4857]: I1128 13:35:25.693990 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-rdxp4" Nov 28 13:35:25 crc kubenswrapper[4857]: I1128 13:35:25.698658 4857 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-v9m96" Nov 28 13:35:25 crc kubenswrapper[4857]: I1128 13:35:25.700852 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-rdxp4"] Nov 28 13:35:25 crc kubenswrapper[4857]: I1128 13:35:25.752241 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/95fc4b68-c383-4f0c-929d-0bf89f13183b-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-rdxp4\" (UID: \"95fc4b68-c383-4f0c-929d-0bf89f13183b\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-rdxp4" Nov 28 13:35:25 crc kubenswrapper[4857]: I1128 13:35:25.752371 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r78b4\" (UniqueName: \"kubernetes.io/projected/95fc4b68-c383-4f0c-929d-0bf89f13183b-kube-api-access-r78b4\") pod \"cert-manager-webhook-f4fb5df64-rdxp4\" (UID: \"95fc4b68-c383-4f0c-929d-0bf89f13183b\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-rdxp4" Nov 28 13:35:25 crc kubenswrapper[4857]: I1128 13:35:25.852945 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r78b4\" (UniqueName: \"kubernetes.io/projected/95fc4b68-c383-4f0c-929d-0bf89f13183b-kube-api-access-r78b4\") pod \"cert-manager-webhook-f4fb5df64-rdxp4\" (UID: \"95fc4b68-c383-4f0c-929d-0bf89f13183b\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-rdxp4" Nov 28 13:35:25 crc kubenswrapper[4857]: I1128 13:35:25.853049 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/95fc4b68-c383-4f0c-929d-0bf89f13183b-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-rdxp4\" (UID: \"95fc4b68-c383-4f0c-929d-0bf89f13183b\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-rdxp4" Nov 28 13:35:25 crc kubenswrapper[4857]: I1128 13:35:25.874611 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r78b4\" (UniqueName: \"kubernetes.io/projected/95fc4b68-c383-4f0c-929d-0bf89f13183b-kube-api-access-r78b4\") pod \"cert-manager-webhook-f4fb5df64-rdxp4\" (UID: \"95fc4b68-c383-4f0c-929d-0bf89f13183b\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-rdxp4" Nov 28 13:35:25 crc kubenswrapper[4857]: I1128 13:35:25.875002 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/95fc4b68-c383-4f0c-929d-0bf89f13183b-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-rdxp4\" (UID: \"95fc4b68-c383-4f0c-929d-0bf89f13183b\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-rdxp4" Nov 28 13:35:26 crc kubenswrapper[4857]: I1128 13:35:26.014284 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-rdxp4" Nov 28 13:35:26 crc kubenswrapper[4857]: I1128 13:35:26.162689 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tpch" event={"ID":"743c1056-0b38-4526-ae21-ef17f20596d2","Type":"ContainerStarted","Data":"74839d96da9ba141bc382691db8ef6254df987ecdc86b457d2dbd00e0d2224b0"} Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.172001 4857 generic.go:334] "Generic (PLEG): container finished" podID="743c1056-0b38-4526-ae21-ef17f20596d2" containerID="74839d96da9ba141bc382691db8ef6254df987ecdc86b457d2dbd00e0d2224b0" exitCode=0 Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.172043 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tpch" event={"ID":"743c1056-0b38-4526-ae21-ef17f20596d2","Type":"ContainerDied","Data":"74839d96da9ba141bc382691db8ef6254df987ecdc86b457d2dbd00e0d2224b0"} Nov 28 13:35:37 crc kubenswrapper[4857]: E1128 13:35:37.772946 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cert-manager/jetstack-cert-manager-rhel9@sha256:29a0fa1c2f2a6cee62a0468a3883d16d491b4af29130dad6e3e2bb2948f274df" Nov 28 13:35:37 crc kubenswrapper[4857]: E1128 13:35:37.775088 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cert-manager-cainjector,Image:registry.redhat.io/cert-manager/jetstack-cert-manager-rhel9@sha256:29a0fa1c2f2a6cee62a0468a3883d16d491b4af29130dad6e3e2bb2948f274df,Command:[/app/cmd/cainjector/cainjector],Args:[--leader-election-namespace=kube-system --v=2],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:9402,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:bound-sa-token,ReadOnly:true,MountPath:/var/run/secrets/openshift/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sb7wn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000710000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cert-manager-cainjector-855d9ccff4-cvgnl_cert-manager(28d034d3-c255-40c7-b019-6b71be38c34f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 13:35:37 crc kubenswrapper[4857]: E1128 13:35:37.777816 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cert-manager-cainjector\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="cert-manager/cert-manager-cainjector-855d9ccff4-cvgnl" podUID="28d034d3-c255-40c7-b019-6b71be38c34f" Nov 28 13:35:38 crc kubenswrapper[4857]: E1128 13:35:38.351732 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cert-manager-cainjector\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cert-manager/jetstack-cert-manager-rhel9@sha256:29a0fa1c2f2a6cee62a0468a3883d16d491b4af29130dad6e3e2bb2948f274df\\\"\"" pod="cert-manager/cert-manager-cainjector-855d9ccff4-cvgnl" podUID="28d034d3-c255-40c7-b019-6b71be38c34f" Nov 28 13:35:38 crc kubenswrapper[4857]: I1128 13:35:38.352384 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-rdxp4"] Nov 28 13:35:38 crc kubenswrapper[4857]: I1128 13:35:38.452232 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-86cb77c54b-pp979"] Nov 28 13:35:38 crc kubenswrapper[4857]: I1128 13:35:38.452939 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-pp979" Nov 28 13:35:38 crc kubenswrapper[4857]: I1128 13:35:38.454512 4857 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-2nngf" Nov 28 13:35:38 crc kubenswrapper[4857]: I1128 13:35:38.464166 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-pp979"] Nov 28 13:35:38 crc kubenswrapper[4857]: I1128 13:35:38.507831 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0726d67c-02a4-48e6-b815-9f0d8c567c1a-bound-sa-token\") pod \"cert-manager-86cb77c54b-pp979\" (UID: \"0726d67c-02a4-48e6-b815-9f0d8c567c1a\") " pod="cert-manager/cert-manager-86cb77c54b-pp979" Nov 28 13:35:38 crc kubenswrapper[4857]: I1128 13:35:38.507913 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5pztd\" (UniqueName: \"kubernetes.io/projected/0726d67c-02a4-48e6-b815-9f0d8c567c1a-kube-api-access-5pztd\") pod \"cert-manager-86cb77c54b-pp979\" (UID: \"0726d67c-02a4-48e6-b815-9f0d8c567c1a\") " pod="cert-manager/cert-manager-86cb77c54b-pp979" Nov 28 13:35:38 crc kubenswrapper[4857]: I1128 13:35:38.609241 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0726d67c-02a4-48e6-b815-9f0d8c567c1a-bound-sa-token\") pod \"cert-manager-86cb77c54b-pp979\" (UID: \"0726d67c-02a4-48e6-b815-9f0d8c567c1a\") " pod="cert-manager/cert-manager-86cb77c54b-pp979" Nov 28 13:35:38 crc kubenswrapper[4857]: I1128 13:35:38.609549 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5pztd\" (UniqueName: \"kubernetes.io/projected/0726d67c-02a4-48e6-b815-9f0d8c567c1a-kube-api-access-5pztd\") pod \"cert-manager-86cb77c54b-pp979\" (UID: \"0726d67c-02a4-48e6-b815-9f0d8c567c1a\") " pod="cert-manager/cert-manager-86cb77c54b-pp979" Nov 28 13:35:38 crc kubenswrapper[4857]: I1128 13:35:38.628933 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0726d67c-02a4-48e6-b815-9f0d8c567c1a-bound-sa-token\") pod \"cert-manager-86cb77c54b-pp979\" (UID: \"0726d67c-02a4-48e6-b815-9f0d8c567c1a\") " pod="cert-manager/cert-manager-86cb77c54b-pp979" Nov 28 13:35:38 crc kubenswrapper[4857]: I1128 13:35:38.630351 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5pztd\" (UniqueName: \"kubernetes.io/projected/0726d67c-02a4-48e6-b815-9f0d8c567c1a-kube-api-access-5pztd\") pod \"cert-manager-86cb77c54b-pp979\" (UID: \"0726d67c-02a4-48e6-b815-9f0d8c567c1a\") " pod="cert-manager/cert-manager-86cb77c54b-pp979" Nov 28 13:35:38 crc kubenswrapper[4857]: I1128 13:35:38.813278 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-pp979" Nov 28 13:35:39 crc kubenswrapper[4857]: I1128 13:35:39.236854 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-pp979"] Nov 28 13:35:39 crc kubenswrapper[4857]: I1128 13:35:39.360876 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-pp979" event={"ID":"0726d67c-02a4-48e6-b815-9f0d8c567c1a","Type":"ContainerStarted","Data":"ef68b1bd66772dfd61ee94635566660c5a4b29c9bd8dd9e2e424c5d7b83f13b0"} Nov 28 13:35:39 crc kubenswrapper[4857]: I1128 13:35:39.360958 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-pp979" event={"ID":"0726d67c-02a4-48e6-b815-9f0d8c567c1a","Type":"ContainerStarted","Data":"10736fc30910cc7386c41771ac988e0039373402c7fdfed752098643104542c4"} Nov 28 13:35:39 crc kubenswrapper[4857]: I1128 13:35:39.363517 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-rdxp4" event={"ID":"95fc4b68-c383-4f0c-929d-0bf89f13183b","Type":"ContainerStarted","Data":"5c70798602ae3a4491c5301c1bb1f8a30c40a0c4cd67b73c62016cfccb8c7340"} Nov 28 13:35:39 crc kubenswrapper[4857]: I1128 13:35:39.363555 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-rdxp4" event={"ID":"95fc4b68-c383-4f0c-929d-0bf89f13183b","Type":"ContainerStarted","Data":"2eeae813d1be8f7301e7c21835a7512b678e846ab0d3985a10b7676ff6e8259d"} Nov 28 13:35:39 crc kubenswrapper[4857]: I1128 13:35:39.363615 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-f4fb5df64-rdxp4" Nov 28 13:35:39 crc kubenswrapper[4857]: I1128 13:35:39.367162 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tpch" event={"ID":"743c1056-0b38-4526-ae21-ef17f20596d2","Type":"ContainerStarted","Data":"ce29458c1d4a64591c67f7a8baa8781a2d91d561a766e349ff6474d2dd93df9b"} Nov 28 13:35:39 crc kubenswrapper[4857]: I1128 13:35:39.388144 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-86cb77c54b-pp979" podStartSLOduration=1.388120037 podStartE2EDuration="1.388120037s" podCreationTimestamp="2025-11-28 13:35:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:35:39.379462705 +0000 UTC m=+1031.406837872" watchObservedRunningTime="2025-11-28 13:35:39.388120037 +0000 UTC m=+1031.415495204" Nov 28 13:35:39 crc kubenswrapper[4857]: I1128 13:35:39.420058 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-f4fb5df64-rdxp4" podStartSLOduration=13.580831625 podStartE2EDuration="14.42004174s" podCreationTimestamp="2025-11-28 13:35:25 +0000 UTC" firstStartedPulling="2025-11-28 13:35:38.35899143 +0000 UTC m=+1030.386366597" lastFinishedPulling="2025-11-28 13:35:39.198201545 +0000 UTC m=+1031.225576712" observedRunningTime="2025-11-28 13:35:39.405281857 +0000 UTC m=+1031.432657024" watchObservedRunningTime="2025-11-28 13:35:39.42004174 +0000 UTC m=+1031.447416907" Nov 28 13:35:42 crc kubenswrapper[4857]: I1128 13:35:42.810397 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7tpch" Nov 28 13:35:42 crc kubenswrapper[4857]: I1128 13:35:42.810825 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7tpch" Nov 28 13:35:42 crc kubenswrapper[4857]: I1128 13:35:42.853605 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7tpch" Nov 28 13:35:42 crc kubenswrapper[4857]: I1128 13:35:42.876138 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7tpch" podStartSLOduration=7.260358109 podStartE2EDuration="20.876114905s" podCreationTimestamp="2025-11-28 13:35:22 +0000 UTC" firstStartedPulling="2025-11-28 13:35:24.152212553 +0000 UTC m=+1016.179587720" lastFinishedPulling="2025-11-28 13:35:37.767969339 +0000 UTC m=+1029.795344516" observedRunningTime="2025-11-28 13:35:39.439611258 +0000 UTC m=+1031.466986425" watchObservedRunningTime="2025-11-28 13:35:42.876114905 +0000 UTC m=+1034.903490092" Nov 28 13:35:43 crc kubenswrapper[4857]: I1128 13:35:43.446690 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7tpch" Nov 28 13:35:43 crc kubenswrapper[4857]: I1128 13:35:43.490716 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7tpch"] Nov 28 13:35:45 crc kubenswrapper[4857]: I1128 13:35:45.411099 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7tpch" podUID="743c1056-0b38-4526-ae21-ef17f20596d2" containerName="registry-server" containerID="cri-o://ce29458c1d4a64591c67f7a8baa8781a2d91d561a766e349ff6474d2dd93df9b" gracePeriod=2 Nov 28 13:35:45 crc kubenswrapper[4857]: I1128 13:35:45.774638 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7tpch" Nov 28 13:35:45 crc kubenswrapper[4857]: I1128 13:35:45.916687 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b79cc\" (UniqueName: \"kubernetes.io/projected/743c1056-0b38-4526-ae21-ef17f20596d2-kube-api-access-b79cc\") pod \"743c1056-0b38-4526-ae21-ef17f20596d2\" (UID: \"743c1056-0b38-4526-ae21-ef17f20596d2\") " Nov 28 13:35:45 crc kubenswrapper[4857]: I1128 13:35:45.916825 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/743c1056-0b38-4526-ae21-ef17f20596d2-utilities\") pod \"743c1056-0b38-4526-ae21-ef17f20596d2\" (UID: \"743c1056-0b38-4526-ae21-ef17f20596d2\") " Nov 28 13:35:45 crc kubenswrapper[4857]: I1128 13:35:45.916869 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/743c1056-0b38-4526-ae21-ef17f20596d2-catalog-content\") pod \"743c1056-0b38-4526-ae21-ef17f20596d2\" (UID: \"743c1056-0b38-4526-ae21-ef17f20596d2\") " Nov 28 13:35:45 crc kubenswrapper[4857]: I1128 13:35:45.917828 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/743c1056-0b38-4526-ae21-ef17f20596d2-utilities" (OuterVolumeSpecName: "utilities") pod "743c1056-0b38-4526-ae21-ef17f20596d2" (UID: "743c1056-0b38-4526-ae21-ef17f20596d2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:35:45 crc kubenswrapper[4857]: I1128 13:35:45.922621 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/743c1056-0b38-4526-ae21-ef17f20596d2-kube-api-access-b79cc" (OuterVolumeSpecName: "kube-api-access-b79cc") pod "743c1056-0b38-4526-ae21-ef17f20596d2" (UID: "743c1056-0b38-4526-ae21-ef17f20596d2"). InnerVolumeSpecName "kube-api-access-b79cc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.012811 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/743c1056-0b38-4526-ae21-ef17f20596d2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "743c1056-0b38-4526-ae21-ef17f20596d2" (UID: "743c1056-0b38-4526-ae21-ef17f20596d2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.018168 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b79cc\" (UniqueName: \"kubernetes.io/projected/743c1056-0b38-4526-ae21-ef17f20596d2-kube-api-access-b79cc\") on node \"crc\" DevicePath \"\"" Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.018217 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/743c1056-0b38-4526-ae21-ef17f20596d2-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.018246 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/743c1056-0b38-4526-ae21-ef17f20596d2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.018293 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-f4fb5df64-rdxp4" Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.421905 4857 generic.go:334] "Generic (PLEG): container finished" podID="743c1056-0b38-4526-ae21-ef17f20596d2" containerID="ce29458c1d4a64591c67f7a8baa8781a2d91d561a766e349ff6474d2dd93df9b" exitCode=0 Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.421959 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tpch" event={"ID":"743c1056-0b38-4526-ae21-ef17f20596d2","Type":"ContainerDied","Data":"ce29458c1d4a64591c67f7a8baa8781a2d91d561a766e349ff6474d2dd93df9b"} Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.422002 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tpch" event={"ID":"743c1056-0b38-4526-ae21-ef17f20596d2","Type":"ContainerDied","Data":"555a2035b02572e2ea93f13b8302fc51be01908c34fa25333a9e552c3c8f0b3b"} Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.422042 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7tpch" Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.422053 4857 scope.go:117] "RemoveContainer" containerID="ce29458c1d4a64591c67f7a8baa8781a2d91d561a766e349ff6474d2dd93df9b" Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.443962 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7tpch"] Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.448803 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7tpch"] Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.452176 4857 scope.go:117] "RemoveContainer" containerID="74839d96da9ba141bc382691db8ef6254df987ecdc86b457d2dbd00e0d2224b0" Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.484145 4857 scope.go:117] "RemoveContainer" containerID="f349bbc65b50908f85ca6d96697079d5ad2e62b1e2706bdaeb29ab4736747e8b" Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.504669 4857 scope.go:117] "RemoveContainer" containerID="ce29458c1d4a64591c67f7a8baa8781a2d91d561a766e349ff6474d2dd93df9b" Nov 28 13:35:46 crc kubenswrapper[4857]: E1128 13:35:46.505151 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce29458c1d4a64591c67f7a8baa8781a2d91d561a766e349ff6474d2dd93df9b\": container with ID starting with ce29458c1d4a64591c67f7a8baa8781a2d91d561a766e349ff6474d2dd93df9b not found: ID does not exist" containerID="ce29458c1d4a64591c67f7a8baa8781a2d91d561a766e349ff6474d2dd93df9b" Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.505199 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce29458c1d4a64591c67f7a8baa8781a2d91d561a766e349ff6474d2dd93df9b"} err="failed to get container status \"ce29458c1d4a64591c67f7a8baa8781a2d91d561a766e349ff6474d2dd93df9b\": rpc error: code = NotFound desc = could not find container \"ce29458c1d4a64591c67f7a8baa8781a2d91d561a766e349ff6474d2dd93df9b\": container with ID starting with ce29458c1d4a64591c67f7a8baa8781a2d91d561a766e349ff6474d2dd93df9b not found: ID does not exist" Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.505228 4857 scope.go:117] "RemoveContainer" containerID="74839d96da9ba141bc382691db8ef6254df987ecdc86b457d2dbd00e0d2224b0" Nov 28 13:35:46 crc kubenswrapper[4857]: E1128 13:35:46.505934 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74839d96da9ba141bc382691db8ef6254df987ecdc86b457d2dbd00e0d2224b0\": container with ID starting with 74839d96da9ba141bc382691db8ef6254df987ecdc86b457d2dbd00e0d2224b0 not found: ID does not exist" containerID="74839d96da9ba141bc382691db8ef6254df987ecdc86b457d2dbd00e0d2224b0" Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.506005 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74839d96da9ba141bc382691db8ef6254df987ecdc86b457d2dbd00e0d2224b0"} err="failed to get container status \"74839d96da9ba141bc382691db8ef6254df987ecdc86b457d2dbd00e0d2224b0\": rpc error: code = NotFound desc = could not find container \"74839d96da9ba141bc382691db8ef6254df987ecdc86b457d2dbd00e0d2224b0\": container with ID starting with 74839d96da9ba141bc382691db8ef6254df987ecdc86b457d2dbd00e0d2224b0 not found: ID does not exist" Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.506055 4857 scope.go:117] "RemoveContainer" containerID="f349bbc65b50908f85ca6d96697079d5ad2e62b1e2706bdaeb29ab4736747e8b" Nov 28 13:35:46 crc kubenswrapper[4857]: E1128 13:35:46.506456 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f349bbc65b50908f85ca6d96697079d5ad2e62b1e2706bdaeb29ab4736747e8b\": container with ID starting with f349bbc65b50908f85ca6d96697079d5ad2e62b1e2706bdaeb29ab4736747e8b not found: ID does not exist" containerID="f349bbc65b50908f85ca6d96697079d5ad2e62b1e2706bdaeb29ab4736747e8b" Nov 28 13:35:46 crc kubenswrapper[4857]: I1128 13:35:46.506532 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f349bbc65b50908f85ca6d96697079d5ad2e62b1e2706bdaeb29ab4736747e8b"} err="failed to get container status \"f349bbc65b50908f85ca6d96697079d5ad2e62b1e2706bdaeb29ab4736747e8b\": rpc error: code = NotFound desc = could not find container \"f349bbc65b50908f85ca6d96697079d5ad2e62b1e2706bdaeb29ab4736747e8b\": container with ID starting with f349bbc65b50908f85ca6d96697079d5ad2e62b1e2706bdaeb29ab4736747e8b not found: ID does not exist" Nov 28 13:35:48 crc kubenswrapper[4857]: I1128 13:35:48.320726 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="743c1056-0b38-4526-ae21-ef17f20596d2" path="/var/lib/kubelet/pods/743c1056-0b38-4526-ae21-ef17f20596d2/volumes" Nov 28 13:35:53 crc kubenswrapper[4857]: I1128 13:35:53.474159 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-cvgnl" event={"ID":"28d034d3-c255-40c7-b019-6b71be38c34f","Type":"ContainerStarted","Data":"21dcb4d54754410422eabf2ecb7c46b93245d7d7f9d11ab87464a0ae0bf41144"} Nov 28 13:35:53 crc kubenswrapper[4857]: I1128 13:35:53.500420 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-855d9ccff4-cvgnl" podStartSLOduration=-9223372004.354374 podStartE2EDuration="32.500400952s" podCreationTimestamp="2025-11-28 13:35:21 +0000 UTC" firstStartedPulling="2025-11-28 13:35:22.097167819 +0000 UTC m=+1014.124542986" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:35:53.499015683 +0000 UTC m=+1045.526390860" watchObservedRunningTime="2025-11-28 13:35:53.500400952 +0000 UTC m=+1045.527776129" Nov 28 13:35:57 crc kubenswrapper[4857]: I1128 13:35:57.513894 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-7cksq"] Nov 28 13:35:57 crc kubenswrapper[4857]: E1128 13:35:57.514570 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="743c1056-0b38-4526-ae21-ef17f20596d2" containerName="registry-server" Nov 28 13:35:57 crc kubenswrapper[4857]: I1128 13:35:57.514595 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="743c1056-0b38-4526-ae21-ef17f20596d2" containerName="registry-server" Nov 28 13:35:57 crc kubenswrapper[4857]: E1128 13:35:57.514620 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="743c1056-0b38-4526-ae21-ef17f20596d2" containerName="extract-content" Nov 28 13:35:57 crc kubenswrapper[4857]: I1128 13:35:57.514631 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="743c1056-0b38-4526-ae21-ef17f20596d2" containerName="extract-content" Nov 28 13:35:57 crc kubenswrapper[4857]: E1128 13:35:57.514655 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="743c1056-0b38-4526-ae21-ef17f20596d2" containerName="extract-utilities" Nov 28 13:35:57 crc kubenswrapper[4857]: I1128 13:35:57.514665 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="743c1056-0b38-4526-ae21-ef17f20596d2" containerName="extract-utilities" Nov 28 13:35:57 crc kubenswrapper[4857]: I1128 13:35:57.514866 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="743c1056-0b38-4526-ae21-ef17f20596d2" containerName="registry-server" Nov 28 13:35:57 crc kubenswrapper[4857]: I1128 13:35:57.515383 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-7cksq" Nov 28 13:35:57 crc kubenswrapper[4857]: I1128 13:35:57.532088 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-qrt7f" Nov 28 13:35:57 crc kubenswrapper[4857]: I1128 13:35:57.532290 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 28 13:35:57 crc kubenswrapper[4857]: I1128 13:35:57.534701 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 28 13:35:57 crc kubenswrapper[4857]: I1128 13:35:57.539067 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-7cksq"] Nov 28 13:35:57 crc kubenswrapper[4857]: I1128 13:35:57.634387 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lg4km\" (UniqueName: \"kubernetes.io/projected/97d1f33e-903d-4290-9993-74e2bcd6953e-kube-api-access-lg4km\") pod \"openstack-operator-index-7cksq\" (UID: \"97d1f33e-903d-4290-9993-74e2bcd6953e\") " pod="openstack-operators/openstack-operator-index-7cksq" Nov 28 13:35:57 crc kubenswrapper[4857]: I1128 13:35:57.736231 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lg4km\" (UniqueName: \"kubernetes.io/projected/97d1f33e-903d-4290-9993-74e2bcd6953e-kube-api-access-lg4km\") pod \"openstack-operator-index-7cksq\" (UID: \"97d1f33e-903d-4290-9993-74e2bcd6953e\") " pod="openstack-operators/openstack-operator-index-7cksq" Nov 28 13:35:57 crc kubenswrapper[4857]: I1128 13:35:57.758199 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lg4km\" (UniqueName: \"kubernetes.io/projected/97d1f33e-903d-4290-9993-74e2bcd6953e-kube-api-access-lg4km\") pod \"openstack-operator-index-7cksq\" (UID: \"97d1f33e-903d-4290-9993-74e2bcd6953e\") " pod="openstack-operators/openstack-operator-index-7cksq" Nov 28 13:35:57 crc kubenswrapper[4857]: I1128 13:35:57.847561 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-7cksq" Nov 28 13:35:58 crc kubenswrapper[4857]: I1128 13:35:58.023995 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-7cksq"] Nov 28 13:35:58 crc kubenswrapper[4857]: I1128 13:35:58.506135 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-7cksq" event={"ID":"97d1f33e-903d-4290-9993-74e2bcd6953e","Type":"ContainerStarted","Data":"aae77bdeb28ec157f13e3937dc5c3817dbfd84bdeca6ce3cf92ce00ef991c2b8"} Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.893263 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-7cksq"] Nov 28 13:36:01 crc kubenswrapper[4857]: I1128 13:36:01.496521 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-9b28s"] Nov 28 13:36:01 crc kubenswrapper[4857]: I1128 13:36:01.497231 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-9b28s" Nov 28 13:36:01 crc kubenswrapper[4857]: I1128 13:36:01.516316 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-9b28s"] Nov 28 13:36:01 crc kubenswrapper[4857]: I1128 13:36:01.611732 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjmm8\" (UniqueName: \"kubernetes.io/projected/2f9dbaf9-124d-4667-9744-afd253fd2c68-kube-api-access-cjmm8\") pod \"openstack-operator-index-9b28s\" (UID: \"2f9dbaf9-124d-4667-9744-afd253fd2c68\") " pod="openstack-operators/openstack-operator-index-9b28s" Nov 28 13:36:01 crc kubenswrapper[4857]: I1128 13:36:01.713703 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjmm8\" (UniqueName: \"kubernetes.io/projected/2f9dbaf9-124d-4667-9744-afd253fd2c68-kube-api-access-cjmm8\") pod \"openstack-operator-index-9b28s\" (UID: \"2f9dbaf9-124d-4667-9744-afd253fd2c68\") " pod="openstack-operators/openstack-operator-index-9b28s" Nov 28 13:36:01 crc kubenswrapper[4857]: I1128 13:36:01.738725 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjmm8\" (UniqueName: \"kubernetes.io/projected/2f9dbaf9-124d-4667-9744-afd253fd2c68-kube-api-access-cjmm8\") pod \"openstack-operator-index-9b28s\" (UID: \"2f9dbaf9-124d-4667-9744-afd253fd2c68\") " pod="openstack-operators/openstack-operator-index-9b28s" Nov 28 13:36:01 crc kubenswrapper[4857]: I1128 13:36:01.816497 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-9b28s" Nov 28 13:36:02 crc kubenswrapper[4857]: I1128 13:36:02.008055 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-9b28s"] Nov 28 13:36:02 crc kubenswrapper[4857]: W1128 13:36:02.010048 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2f9dbaf9_124d_4667_9744_afd253fd2c68.slice/crio-5075afb36e380800d5edc125bb21c25eb8913cae188992dcf9808c6331bc3c53 WatchSource:0}: Error finding container 5075afb36e380800d5edc125bb21c25eb8913cae188992dcf9808c6331bc3c53: Status 404 returned error can't find the container with id 5075afb36e380800d5edc125bb21c25eb8913cae188992dcf9808c6331bc3c53 Nov 28 13:36:02 crc kubenswrapper[4857]: I1128 13:36:02.533138 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-9b28s" event={"ID":"2f9dbaf9-124d-4667-9744-afd253fd2c68","Type":"ContainerStarted","Data":"5075afb36e380800d5edc125bb21c25eb8913cae188992dcf9808c6331bc3c53"} Nov 28 13:36:05 crc kubenswrapper[4857]: I1128 13:36:05.559561 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-7cksq" event={"ID":"97d1f33e-903d-4290-9993-74e2bcd6953e","Type":"ContainerStarted","Data":"705393b87d9bb8c7a08bd05f4ed12f891d362adb1a977a0acb4c4d9506166149"} Nov 28 13:36:05 crc kubenswrapper[4857]: I1128 13:36:05.559741 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-7cksq" podUID="97d1f33e-903d-4290-9993-74e2bcd6953e" containerName="registry-server" containerID="cri-o://705393b87d9bb8c7a08bd05f4ed12f891d362adb1a977a0acb4c4d9506166149" gracePeriod=2 Nov 28 13:36:05 crc kubenswrapper[4857]: I1128 13:36:05.564370 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-9b28s" event={"ID":"2f9dbaf9-124d-4667-9744-afd253fd2c68","Type":"ContainerStarted","Data":"50c1b2cc0b5d1bcaf247a91fce4c64000de02588eefa32c58722d4ece86fe90a"} Nov 28 13:36:05 crc kubenswrapper[4857]: I1128 13:36:05.576597 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-7cksq" podStartSLOduration=1.543672315 podStartE2EDuration="8.576576082s" podCreationTimestamp="2025-11-28 13:35:57 +0000 UTC" firstStartedPulling="2025-11-28 13:35:58.034195223 +0000 UTC m=+1050.061570390" lastFinishedPulling="2025-11-28 13:36:05.067099 +0000 UTC m=+1057.094474157" observedRunningTime="2025-11-28 13:36:05.574914155 +0000 UTC m=+1057.602289322" watchObservedRunningTime="2025-11-28 13:36:05.576576082 +0000 UTC m=+1057.603951259" Nov 28 13:36:05 crc kubenswrapper[4857]: I1128 13:36:05.592224 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-9b28s" podStartSLOduration=1.5359847599999998 podStartE2EDuration="4.592207979s" podCreationTimestamp="2025-11-28 13:36:01 +0000 UTC" firstStartedPulling="2025-11-28 13:36:02.0119102 +0000 UTC m=+1054.039285367" lastFinishedPulling="2025-11-28 13:36:05.068133419 +0000 UTC m=+1057.095508586" observedRunningTime="2025-11-28 13:36:05.587860737 +0000 UTC m=+1057.615235914" watchObservedRunningTime="2025-11-28 13:36:05.592207979 +0000 UTC m=+1057.619583146" Nov 28 13:36:05 crc kubenswrapper[4857]: I1128 13:36:05.935037 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-7cksq" Nov 28 13:36:06 crc kubenswrapper[4857]: I1128 13:36:06.069520 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lg4km\" (UniqueName: \"kubernetes.io/projected/97d1f33e-903d-4290-9993-74e2bcd6953e-kube-api-access-lg4km\") pod \"97d1f33e-903d-4290-9993-74e2bcd6953e\" (UID: \"97d1f33e-903d-4290-9993-74e2bcd6953e\") " Nov 28 13:36:06 crc kubenswrapper[4857]: I1128 13:36:06.076556 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97d1f33e-903d-4290-9993-74e2bcd6953e-kube-api-access-lg4km" (OuterVolumeSpecName: "kube-api-access-lg4km") pod "97d1f33e-903d-4290-9993-74e2bcd6953e" (UID: "97d1f33e-903d-4290-9993-74e2bcd6953e"). InnerVolumeSpecName "kube-api-access-lg4km". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:36:06 crc kubenswrapper[4857]: I1128 13:36:06.171578 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lg4km\" (UniqueName: \"kubernetes.io/projected/97d1f33e-903d-4290-9993-74e2bcd6953e-kube-api-access-lg4km\") on node \"crc\" DevicePath \"\"" Nov 28 13:36:06 crc kubenswrapper[4857]: I1128 13:36:06.576675 4857 generic.go:334] "Generic (PLEG): container finished" podID="97d1f33e-903d-4290-9993-74e2bcd6953e" containerID="705393b87d9bb8c7a08bd05f4ed12f891d362adb1a977a0acb4c4d9506166149" exitCode=0 Nov 28 13:36:06 crc kubenswrapper[4857]: I1128 13:36:06.576798 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-7cksq" event={"ID":"97d1f33e-903d-4290-9993-74e2bcd6953e","Type":"ContainerDied","Data":"705393b87d9bb8c7a08bd05f4ed12f891d362adb1a977a0acb4c4d9506166149"} Nov 28 13:36:06 crc kubenswrapper[4857]: I1128 13:36:06.576841 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-7cksq" Nov 28 13:36:06 crc kubenswrapper[4857]: I1128 13:36:06.576885 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-7cksq" event={"ID":"97d1f33e-903d-4290-9993-74e2bcd6953e","Type":"ContainerDied","Data":"aae77bdeb28ec157f13e3937dc5c3817dbfd84bdeca6ce3cf92ce00ef991c2b8"} Nov 28 13:36:06 crc kubenswrapper[4857]: I1128 13:36:06.576904 4857 scope.go:117] "RemoveContainer" containerID="705393b87d9bb8c7a08bd05f4ed12f891d362adb1a977a0acb4c4d9506166149" Nov 28 13:36:06 crc kubenswrapper[4857]: I1128 13:36:06.611323 4857 scope.go:117] "RemoveContainer" containerID="705393b87d9bb8c7a08bd05f4ed12f891d362adb1a977a0acb4c4d9506166149" Nov 28 13:36:06 crc kubenswrapper[4857]: E1128 13:36:06.612011 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"705393b87d9bb8c7a08bd05f4ed12f891d362adb1a977a0acb4c4d9506166149\": container with ID starting with 705393b87d9bb8c7a08bd05f4ed12f891d362adb1a977a0acb4c4d9506166149 not found: ID does not exist" containerID="705393b87d9bb8c7a08bd05f4ed12f891d362adb1a977a0acb4c4d9506166149" Nov 28 13:36:06 crc kubenswrapper[4857]: I1128 13:36:06.612124 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"705393b87d9bb8c7a08bd05f4ed12f891d362adb1a977a0acb4c4d9506166149"} err="failed to get container status \"705393b87d9bb8c7a08bd05f4ed12f891d362adb1a977a0acb4c4d9506166149\": rpc error: code = NotFound desc = could not find container \"705393b87d9bb8c7a08bd05f4ed12f891d362adb1a977a0acb4c4d9506166149\": container with ID starting with 705393b87d9bb8c7a08bd05f4ed12f891d362adb1a977a0acb4c4d9506166149 not found: ID does not exist" Nov 28 13:36:06 crc kubenswrapper[4857]: I1128 13:36:06.612682 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-7cksq"] Nov 28 13:36:06 crc kubenswrapper[4857]: I1128 13:36:06.623702 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-7cksq"] Nov 28 13:36:08 crc kubenswrapper[4857]: I1128 13:36:08.322820 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97d1f33e-903d-4290-9993-74e2bcd6953e" path="/var/lib/kubelet/pods/97d1f33e-903d-4290-9993-74e2bcd6953e/volumes" Nov 28 13:36:11 crc kubenswrapper[4857]: I1128 13:36:11.816673 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-9b28s" Nov 28 13:36:11 crc kubenswrapper[4857]: I1128 13:36:11.816795 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-9b28s" Nov 28 13:36:11 crc kubenswrapper[4857]: I1128 13:36:11.851855 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-9b28s" Nov 28 13:36:12 crc kubenswrapper[4857]: I1128 13:36:12.656619 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-9b28s" Nov 28 13:36:19 crc kubenswrapper[4857]: I1128 13:36:19.151650 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw"] Nov 28 13:36:19 crc kubenswrapper[4857]: E1128 13:36:19.152456 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97d1f33e-903d-4290-9993-74e2bcd6953e" containerName="registry-server" Nov 28 13:36:19 crc kubenswrapper[4857]: I1128 13:36:19.152474 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="97d1f33e-903d-4290-9993-74e2bcd6953e" containerName="registry-server" Nov 28 13:36:19 crc kubenswrapper[4857]: I1128 13:36:19.152682 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="97d1f33e-903d-4290-9993-74e2bcd6953e" containerName="registry-server" Nov 28 13:36:19 crc kubenswrapper[4857]: I1128 13:36:19.153991 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw" Nov 28 13:36:19 crc kubenswrapper[4857]: I1128 13:36:19.159226 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-kjmq8" Nov 28 13:36:19 crc kubenswrapper[4857]: I1128 13:36:19.162162 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw"] Nov 28 13:36:19 crc kubenswrapper[4857]: I1128 13:36:19.257590 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9af5d950-6280-44db-85a8-91e7172b9d51-bundle\") pod \"5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw\" (UID: \"9af5d950-6280-44db-85a8-91e7172b9d51\") " pod="openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw" Nov 28 13:36:19 crc kubenswrapper[4857]: I1128 13:36:19.258015 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t45wq\" (UniqueName: \"kubernetes.io/projected/9af5d950-6280-44db-85a8-91e7172b9d51-kube-api-access-t45wq\") pod \"5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw\" (UID: \"9af5d950-6280-44db-85a8-91e7172b9d51\") " pod="openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw" Nov 28 13:36:19 crc kubenswrapper[4857]: I1128 13:36:19.258242 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9af5d950-6280-44db-85a8-91e7172b9d51-util\") pod \"5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw\" (UID: \"9af5d950-6280-44db-85a8-91e7172b9d51\") " pod="openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw" Nov 28 13:36:19 crc kubenswrapper[4857]: I1128 13:36:19.360424 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9af5d950-6280-44db-85a8-91e7172b9d51-bundle\") pod \"5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw\" (UID: \"9af5d950-6280-44db-85a8-91e7172b9d51\") " pod="openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw" Nov 28 13:36:19 crc kubenswrapper[4857]: I1128 13:36:19.360482 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t45wq\" (UniqueName: \"kubernetes.io/projected/9af5d950-6280-44db-85a8-91e7172b9d51-kube-api-access-t45wq\") pod \"5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw\" (UID: \"9af5d950-6280-44db-85a8-91e7172b9d51\") " pod="openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw" Nov 28 13:36:19 crc kubenswrapper[4857]: I1128 13:36:19.360537 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9af5d950-6280-44db-85a8-91e7172b9d51-util\") pod \"5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw\" (UID: \"9af5d950-6280-44db-85a8-91e7172b9d51\") " pod="openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw" Nov 28 13:36:19 crc kubenswrapper[4857]: I1128 13:36:19.361244 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9af5d950-6280-44db-85a8-91e7172b9d51-util\") pod \"5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw\" (UID: \"9af5d950-6280-44db-85a8-91e7172b9d51\") " pod="openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw" Nov 28 13:36:19 crc kubenswrapper[4857]: I1128 13:36:19.361943 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9af5d950-6280-44db-85a8-91e7172b9d51-bundle\") pod \"5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw\" (UID: \"9af5d950-6280-44db-85a8-91e7172b9d51\") " pod="openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw" Nov 28 13:36:19 crc kubenswrapper[4857]: I1128 13:36:19.390989 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t45wq\" (UniqueName: \"kubernetes.io/projected/9af5d950-6280-44db-85a8-91e7172b9d51-kube-api-access-t45wq\") pod \"5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw\" (UID: \"9af5d950-6280-44db-85a8-91e7172b9d51\") " pod="openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw" Nov 28 13:36:19 crc kubenswrapper[4857]: I1128 13:36:19.483732 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw" Nov 28 13:36:20 crc kubenswrapper[4857]: I1128 13:36:20.088997 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw"] Nov 28 13:36:20 crc kubenswrapper[4857]: I1128 13:36:20.699833 4857 generic.go:334] "Generic (PLEG): container finished" podID="9af5d950-6280-44db-85a8-91e7172b9d51" containerID="5c124f5308b87cc0ed661f3c6edb6943e99f76937a96dc9e233c9e9e5750470e" exitCode=0 Nov 28 13:36:20 crc kubenswrapper[4857]: I1128 13:36:20.699902 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw" event={"ID":"9af5d950-6280-44db-85a8-91e7172b9d51","Type":"ContainerDied","Data":"5c124f5308b87cc0ed661f3c6edb6943e99f76937a96dc9e233c9e9e5750470e"} Nov 28 13:36:20 crc kubenswrapper[4857]: I1128 13:36:20.700094 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw" event={"ID":"9af5d950-6280-44db-85a8-91e7172b9d51","Type":"ContainerStarted","Data":"8cc0070431b5c66b94ecf3808bd7160b9299b12c9139048e955586f6e6be5b04"} Nov 28 13:36:21 crc kubenswrapper[4857]: I1128 13:36:21.727109 4857 generic.go:334] "Generic (PLEG): container finished" podID="9af5d950-6280-44db-85a8-91e7172b9d51" containerID="63db8ca5b10154e43149dcf94aafca84d848bc0e912877e2ba8387fba1ba44db" exitCode=0 Nov 28 13:36:21 crc kubenswrapper[4857]: I1128 13:36:21.727622 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw" event={"ID":"9af5d950-6280-44db-85a8-91e7172b9d51","Type":"ContainerDied","Data":"63db8ca5b10154e43149dcf94aafca84d848bc0e912877e2ba8387fba1ba44db"} Nov 28 13:36:22 crc kubenswrapper[4857]: I1128 13:36:22.734170 4857 generic.go:334] "Generic (PLEG): container finished" podID="9af5d950-6280-44db-85a8-91e7172b9d51" containerID="fd5b2144efd333a21db859f2c1cd0fac95d62ef5d76ea16aa2f9053332c7a460" exitCode=0 Nov 28 13:36:22 crc kubenswrapper[4857]: I1128 13:36:22.735704 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw" event={"ID":"9af5d950-6280-44db-85a8-91e7172b9d51","Type":"ContainerDied","Data":"fd5b2144efd333a21db859f2c1cd0fac95d62ef5d76ea16aa2f9053332c7a460"} Nov 28 13:36:24 crc kubenswrapper[4857]: I1128 13:36:24.069163 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw" Nov 28 13:36:24 crc kubenswrapper[4857]: I1128 13:36:24.126515 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9af5d950-6280-44db-85a8-91e7172b9d51-util\") pod \"9af5d950-6280-44db-85a8-91e7172b9d51\" (UID: \"9af5d950-6280-44db-85a8-91e7172b9d51\") " Nov 28 13:36:24 crc kubenswrapper[4857]: I1128 13:36:24.126578 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9af5d950-6280-44db-85a8-91e7172b9d51-bundle\") pod \"9af5d950-6280-44db-85a8-91e7172b9d51\" (UID: \"9af5d950-6280-44db-85a8-91e7172b9d51\") " Nov 28 13:36:24 crc kubenswrapper[4857]: I1128 13:36:24.126635 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t45wq\" (UniqueName: \"kubernetes.io/projected/9af5d950-6280-44db-85a8-91e7172b9d51-kube-api-access-t45wq\") pod \"9af5d950-6280-44db-85a8-91e7172b9d51\" (UID: \"9af5d950-6280-44db-85a8-91e7172b9d51\") " Nov 28 13:36:24 crc kubenswrapper[4857]: I1128 13:36:24.128240 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9af5d950-6280-44db-85a8-91e7172b9d51-bundle" (OuterVolumeSpecName: "bundle") pod "9af5d950-6280-44db-85a8-91e7172b9d51" (UID: "9af5d950-6280-44db-85a8-91e7172b9d51"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:36:24 crc kubenswrapper[4857]: I1128 13:36:24.133275 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9af5d950-6280-44db-85a8-91e7172b9d51-kube-api-access-t45wq" (OuterVolumeSpecName: "kube-api-access-t45wq") pod "9af5d950-6280-44db-85a8-91e7172b9d51" (UID: "9af5d950-6280-44db-85a8-91e7172b9d51"). InnerVolumeSpecName "kube-api-access-t45wq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:36:24 crc kubenswrapper[4857]: I1128 13:36:24.145514 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9af5d950-6280-44db-85a8-91e7172b9d51-util" (OuterVolumeSpecName: "util") pod "9af5d950-6280-44db-85a8-91e7172b9d51" (UID: "9af5d950-6280-44db-85a8-91e7172b9d51"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:36:24 crc kubenswrapper[4857]: I1128 13:36:24.228285 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t45wq\" (UniqueName: \"kubernetes.io/projected/9af5d950-6280-44db-85a8-91e7172b9d51-kube-api-access-t45wq\") on node \"crc\" DevicePath \"\"" Nov 28 13:36:24 crc kubenswrapper[4857]: I1128 13:36:24.228331 4857 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9af5d950-6280-44db-85a8-91e7172b9d51-util\") on node \"crc\" DevicePath \"\"" Nov 28 13:36:24 crc kubenswrapper[4857]: I1128 13:36:24.228346 4857 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9af5d950-6280-44db-85a8-91e7172b9d51-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:36:24 crc kubenswrapper[4857]: I1128 13:36:24.754369 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw" event={"ID":"9af5d950-6280-44db-85a8-91e7172b9d51","Type":"ContainerDied","Data":"8cc0070431b5c66b94ecf3808bd7160b9299b12c9139048e955586f6e6be5b04"} Nov 28 13:36:24 crc kubenswrapper[4857]: I1128 13:36:24.754437 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8cc0070431b5c66b94ecf3808bd7160b9299b12c9139048e955586f6e6be5b04" Nov 28 13:36:24 crc kubenswrapper[4857]: I1128 13:36:24.754441 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw" Nov 28 13:36:32 crc kubenswrapper[4857]: I1128 13:36:32.085673 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-ffb7b6fb5-rn794"] Nov 28 13:36:32 crc kubenswrapper[4857]: E1128 13:36:32.086484 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9af5d950-6280-44db-85a8-91e7172b9d51" containerName="extract" Nov 28 13:36:32 crc kubenswrapper[4857]: I1128 13:36:32.086501 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9af5d950-6280-44db-85a8-91e7172b9d51" containerName="extract" Nov 28 13:36:32 crc kubenswrapper[4857]: E1128 13:36:32.086520 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9af5d950-6280-44db-85a8-91e7172b9d51" containerName="util" Nov 28 13:36:32 crc kubenswrapper[4857]: I1128 13:36:32.086528 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9af5d950-6280-44db-85a8-91e7172b9d51" containerName="util" Nov 28 13:36:32 crc kubenswrapper[4857]: E1128 13:36:32.086553 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9af5d950-6280-44db-85a8-91e7172b9d51" containerName="pull" Nov 28 13:36:32 crc kubenswrapper[4857]: I1128 13:36:32.086559 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9af5d950-6280-44db-85a8-91e7172b9d51" containerName="pull" Nov 28 13:36:32 crc kubenswrapper[4857]: I1128 13:36:32.086685 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="9af5d950-6280-44db-85a8-91e7172b9d51" containerName="extract" Nov 28 13:36:32 crc kubenswrapper[4857]: I1128 13:36:32.087116 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-ffb7b6fb5-rn794" Nov 28 13:36:32 crc kubenswrapper[4857]: I1128 13:36:32.092210 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-lc6s6" Nov 28 13:36:32 crc kubenswrapper[4857]: I1128 13:36:32.113335 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-ffb7b6fb5-rn794"] Nov 28 13:36:32 crc kubenswrapper[4857]: I1128 13:36:32.177606 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scqxq\" (UniqueName: \"kubernetes.io/projected/e8cd6acf-ae30-4fe6-bb1e-7075351b6306-kube-api-access-scqxq\") pod \"openstack-operator-controller-operator-ffb7b6fb5-rn794\" (UID: \"e8cd6acf-ae30-4fe6-bb1e-7075351b6306\") " pod="openstack-operators/openstack-operator-controller-operator-ffb7b6fb5-rn794" Nov 28 13:36:32 crc kubenswrapper[4857]: I1128 13:36:32.278552 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scqxq\" (UniqueName: \"kubernetes.io/projected/e8cd6acf-ae30-4fe6-bb1e-7075351b6306-kube-api-access-scqxq\") pod \"openstack-operator-controller-operator-ffb7b6fb5-rn794\" (UID: \"e8cd6acf-ae30-4fe6-bb1e-7075351b6306\") " pod="openstack-operators/openstack-operator-controller-operator-ffb7b6fb5-rn794" Nov 28 13:36:32 crc kubenswrapper[4857]: I1128 13:36:32.306338 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scqxq\" (UniqueName: \"kubernetes.io/projected/e8cd6acf-ae30-4fe6-bb1e-7075351b6306-kube-api-access-scqxq\") pod \"openstack-operator-controller-operator-ffb7b6fb5-rn794\" (UID: \"e8cd6acf-ae30-4fe6-bb1e-7075351b6306\") " pod="openstack-operators/openstack-operator-controller-operator-ffb7b6fb5-rn794" Nov 28 13:36:32 crc kubenswrapper[4857]: I1128 13:36:32.406257 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-ffb7b6fb5-rn794" Nov 28 13:36:32 crc kubenswrapper[4857]: I1128 13:36:32.832237 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-ffb7b6fb5-rn794"] Nov 28 13:36:32 crc kubenswrapper[4857]: W1128 13:36:32.836820 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode8cd6acf_ae30_4fe6_bb1e_7075351b6306.slice/crio-f5c280494606df7c5b33f646d813909b5561960d79a6a977f54852be27e50139 WatchSource:0}: Error finding container f5c280494606df7c5b33f646d813909b5561960d79a6a977f54852be27e50139: Status 404 returned error can't find the container with id f5c280494606df7c5b33f646d813909b5561960d79a6a977f54852be27e50139 Nov 28 13:36:33 crc kubenswrapper[4857]: I1128 13:36:33.813882 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-ffb7b6fb5-rn794" event={"ID":"e8cd6acf-ae30-4fe6-bb1e-7075351b6306","Type":"ContainerStarted","Data":"f5c280494606df7c5b33f646d813909b5561960d79a6a977f54852be27e50139"} Nov 28 13:36:37 crc kubenswrapper[4857]: I1128 13:36:37.846897 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-ffb7b6fb5-rn794" event={"ID":"e8cd6acf-ae30-4fe6-bb1e-7075351b6306","Type":"ContainerStarted","Data":"51dae9834d4de1482570839322fc1328412519c602d8a34aa39eb5dd75ec769c"} Nov 28 13:36:38 crc kubenswrapper[4857]: I1128 13:36:38.853669 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-ffb7b6fb5-rn794" Nov 28 13:36:38 crc kubenswrapper[4857]: I1128 13:36:38.886518 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-ffb7b6fb5-rn794" podStartSLOduration=2.459487641 podStartE2EDuration="6.886492035s" podCreationTimestamp="2025-11-28 13:36:32 +0000 UTC" firstStartedPulling="2025-11-28 13:36:32.839162526 +0000 UTC m=+1084.866537693" lastFinishedPulling="2025-11-28 13:36:37.26616692 +0000 UTC m=+1089.293542087" observedRunningTime="2025-11-28 13:36:38.880391705 +0000 UTC m=+1090.907766872" watchObservedRunningTime="2025-11-28 13:36:38.886492035 +0000 UTC m=+1090.913867212" Nov 28 13:36:42 crc kubenswrapper[4857]: I1128 13:36:42.410274 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-ffb7b6fb5-rn794" Nov 28 13:37:03 crc kubenswrapper[4857]: I1128 13:37:03.177978 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:37:03 crc kubenswrapper[4857]: I1128 13:37:03.179720 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.618508 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-msp7g"] Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.620269 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-msp7g" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.623068 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-r9w9k" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.635292 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-fp6fc"] Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.636483 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-fp6fc" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.639185 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-pc5wb" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.648883 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-fr6n9"] Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.649992 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-fr6n9" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.653155 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-hkg78" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.653878 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-fr6n9"] Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.658900 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-msp7g"] Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.666808 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-cwbkd"] Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.668139 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-cwbkd" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.675573 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-wkfb4"] Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.677009 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-wkfb4" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.701675 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-z7wfx" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.702408 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-62bbd" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.708392 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-fp6fc"] Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.803884 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvk8n\" (UniqueName: \"kubernetes.io/projected/41a3b8e7-e61b-45fc-a87e-99e2d943fd15-kube-api-access-vvk8n\") pod \"cinder-operator-controller-manager-6b7f75547b-fp6fc\" (UID: \"41a3b8e7-e61b-45fc-a87e-99e2d943fd15\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-fp6fc" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.803931 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnqrq\" (UniqueName: \"kubernetes.io/projected/b35bb4aa-b164-47fe-85bd-8f34b7e55e5e-kube-api-access-vnqrq\") pod \"glance-operator-controller-manager-589cbd6b5b-cwbkd\" (UID: \"b35bb4aa-b164-47fe-85bd-8f34b7e55e5e\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-cwbkd" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.803975 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t68xd\" (UniqueName: \"kubernetes.io/projected/8ba8130c-f7e1-4cc0-8427-5a13997138ce-kube-api-access-t68xd\") pod \"barbican-operator-controller-manager-7b64f4fb85-msp7g\" (UID: \"8ba8130c-f7e1-4cc0-8427-5a13997138ce\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-msp7g" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.804026 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4j6t\" (UniqueName: \"kubernetes.io/projected/e4010942-0dec-4e3e-8f52-f69abf7ace10-kube-api-access-r4j6t\") pod \"heat-operator-controller-manager-5b77f656f-wkfb4\" (UID: \"e4010942-0dec-4e3e-8f52-f69abf7ace10\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-wkfb4" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.804527 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74xdb\" (UniqueName: \"kubernetes.io/projected/42e0b127-fc00-42c7-a7d8-9e5ea55a6590-kube-api-access-74xdb\") pod \"designate-operator-controller-manager-955677c94-fr6n9\" (UID: \"42e0b127-fc00-42c7-a7d8-9e5ea55a6590\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-fr6n9" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.804590 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-wkfb4"] Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.820787 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-cwbkd"] Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.825080 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz"] Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.826179 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.831674 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.831836 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-khqvk" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.861931 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-4gn9q"] Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.862946 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-4gn9q" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.872865 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-n7q8r" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.888818 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-hg5f2"] Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.889917 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-hg5f2" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.894231 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-5967q" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.906532 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74xdb\" (UniqueName: \"kubernetes.io/projected/42e0b127-fc00-42c7-a7d8-9e5ea55a6590-kube-api-access-74xdb\") pod \"designate-operator-controller-manager-955677c94-fr6n9\" (UID: \"42e0b127-fc00-42c7-a7d8-9e5ea55a6590\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-fr6n9" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.906583 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvk8n\" (UniqueName: \"kubernetes.io/projected/41a3b8e7-e61b-45fc-a87e-99e2d943fd15-kube-api-access-vvk8n\") pod \"cinder-operator-controller-manager-6b7f75547b-fp6fc\" (UID: \"41a3b8e7-e61b-45fc-a87e-99e2d943fd15\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-fp6fc" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.906606 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnqrq\" (UniqueName: \"kubernetes.io/projected/b35bb4aa-b164-47fe-85bd-8f34b7e55e5e-kube-api-access-vnqrq\") pod \"glance-operator-controller-manager-589cbd6b5b-cwbkd\" (UID: \"b35bb4aa-b164-47fe-85bd-8f34b7e55e5e\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-cwbkd" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.906636 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t68xd\" (UniqueName: \"kubernetes.io/projected/8ba8130c-f7e1-4cc0-8427-5a13997138ce-kube-api-access-t68xd\") pod \"barbican-operator-controller-manager-7b64f4fb85-msp7g\" (UID: \"8ba8130c-f7e1-4cc0-8427-5a13997138ce\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-msp7g" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.906689 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4j6t\" (UniqueName: \"kubernetes.io/projected/e4010942-0dec-4e3e-8f52-f69abf7ace10-kube-api-access-r4j6t\") pod \"heat-operator-controller-manager-5b77f656f-wkfb4\" (UID: \"e4010942-0dec-4e3e-8f52-f69abf7ace10\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-wkfb4" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.954783 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz"] Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.962637 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4j6t\" (UniqueName: \"kubernetes.io/projected/e4010942-0dec-4e3e-8f52-f69abf7ace10-kube-api-access-r4j6t\") pod \"heat-operator-controller-manager-5b77f656f-wkfb4\" (UID: \"e4010942-0dec-4e3e-8f52-f69abf7ace10\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-wkfb4" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.964446 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvk8n\" (UniqueName: \"kubernetes.io/projected/41a3b8e7-e61b-45fc-a87e-99e2d943fd15-kube-api-access-vvk8n\") pod \"cinder-operator-controller-manager-6b7f75547b-fp6fc\" (UID: \"41a3b8e7-e61b-45fc-a87e-99e2d943fd15\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-fp6fc" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.968620 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-fp6fc" Nov 28 13:37:18 crc kubenswrapper[4857]: I1128 13:37:18.981490 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t68xd\" (UniqueName: \"kubernetes.io/projected/8ba8130c-f7e1-4cc0-8427-5a13997138ce-kube-api-access-t68xd\") pod \"barbican-operator-controller-manager-7b64f4fb85-msp7g\" (UID: \"8ba8130c-f7e1-4cc0-8427-5a13997138ce\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-msp7g" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.010352 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-4gn9q"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.010977 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8s6s\" (UniqueName: \"kubernetes.io/projected/d437c518-be55-44c0-b374-5c3d2d62b49a-kube-api-access-v8s6s\") pod \"infra-operator-controller-manager-57548d458d-qb9wz\" (UID: \"d437c518-be55-44c0-b374-5c3d2d62b49a\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.011012 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f798c\" (UniqueName: \"kubernetes.io/projected/cb4c2469-9178-463d-a9be-700af973c9b8-kube-api-access-f798c\") pod \"ironic-operator-controller-manager-67cb4dc6d4-hg5f2\" (UID: \"cb4c2469-9178-463d-a9be-700af973c9b8\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-hg5f2" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.011038 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9g6s\" (UniqueName: \"kubernetes.io/projected/3c9f0811-92a3-4681-b71e-28d474c3751e-kube-api-access-m9g6s\") pod \"horizon-operator-controller-manager-5d494799bf-4gn9q\" (UID: \"3c9f0811-92a3-4681-b71e-28d474c3751e\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-4gn9q" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.011070 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert\") pod \"infra-operator-controller-manager-57548d458d-qb9wz\" (UID: \"d437c518-be55-44c0-b374-5c3d2d62b49a\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.012176 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74xdb\" (UniqueName: \"kubernetes.io/projected/42e0b127-fc00-42c7-a7d8-9e5ea55a6590-kube-api-access-74xdb\") pod \"designate-operator-controller-manager-955677c94-fr6n9\" (UID: \"42e0b127-fc00-42c7-a7d8-9e5ea55a6590\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-fr6n9" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.026318 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnqrq\" (UniqueName: \"kubernetes.io/projected/b35bb4aa-b164-47fe-85bd-8f34b7e55e5e-kube-api-access-vnqrq\") pod \"glance-operator-controller-manager-589cbd6b5b-cwbkd\" (UID: \"b35bb4aa-b164-47fe-85bd-8f34b7e55e5e\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-cwbkd" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.040323 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-wkfb4" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.056933 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-hg5f2"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.097076 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-x4dfd"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.098473 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-x4dfd" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.099210 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-r9w5w"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.100942 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-r9w5w" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.107287 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-4wtvs"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.108559 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4wtvs" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.127788 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-x4dfd"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.127837 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-r9w5w"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.129029 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8s6s\" (UniqueName: \"kubernetes.io/projected/d437c518-be55-44c0-b374-5c3d2d62b49a-kube-api-access-v8s6s\") pod \"infra-operator-controller-manager-57548d458d-qb9wz\" (UID: \"d437c518-be55-44c0-b374-5c3d2d62b49a\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.129066 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxqgd\" (UniqueName: \"kubernetes.io/projected/49f7d7ee-64a4-4ce9-91ec-a76be0cdd249-kube-api-access-bxqgd\") pod \"keystone-operator-controller-manager-7b4567c7cf-r9w5w\" (UID: \"49f7d7ee-64a4-4ce9-91ec-a76be0cdd249\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-r9w5w" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.129107 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f798c\" (UniqueName: \"kubernetes.io/projected/cb4c2469-9178-463d-a9be-700af973c9b8-kube-api-access-f798c\") pod \"ironic-operator-controller-manager-67cb4dc6d4-hg5f2\" (UID: \"cb4c2469-9178-463d-a9be-700af973c9b8\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-hg5f2" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.129143 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9g6s\" (UniqueName: \"kubernetes.io/projected/3c9f0811-92a3-4681-b71e-28d474c3751e-kube-api-access-m9g6s\") pod \"horizon-operator-controller-manager-5d494799bf-4gn9q\" (UID: \"3c9f0811-92a3-4681-b71e-28d474c3751e\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-4gn9q" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.129184 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bzlj\" (UniqueName: \"kubernetes.io/projected/502d3010-ae32-4e6f-a7bf-614cd1da9dda-kube-api-access-8bzlj\") pod \"manila-operator-controller-manager-5d499bf58b-4wtvs\" (UID: \"502d3010-ae32-4e6f-a7bf-614cd1da9dda\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4wtvs" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.129214 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert\") pod \"infra-operator-controller-manager-57548d458d-qb9wz\" (UID: \"d437c518-be55-44c0-b374-5c3d2d62b49a\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.129253 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9d2q\" (UniqueName: \"kubernetes.io/projected/968d6179-1a75-405c-97cc-cad775d59e28-kube-api-access-p9d2q\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-x4dfd\" (UID: \"968d6179-1a75-405c-97cc-cad775d59e28\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-x4dfd" Nov 28 13:37:19 crc kubenswrapper[4857]: E1128 13:37:19.129903 4857 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 13:37:19 crc kubenswrapper[4857]: E1128 13:37:19.129950 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert podName:d437c518-be55-44c0-b374-5c3d2d62b49a nodeName:}" failed. No retries permitted until 2025-11-28 13:37:19.629931447 +0000 UTC m=+1131.657306614 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert") pod "infra-operator-controller-manager-57548d458d-qb9wz" (UID: "d437c518-be55-44c0-b374-5c3d2d62b49a") : secret "infra-operator-webhook-server-cert" not found Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.139047 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-pwqtc" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.139499 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-v2qq8" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.139696 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-vd6xv" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.153663 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-4wtvs"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.170382 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9g6s\" (UniqueName: \"kubernetes.io/projected/3c9f0811-92a3-4681-b71e-28d474c3751e-kube-api-access-m9g6s\") pod \"horizon-operator-controller-manager-5d494799bf-4gn9q\" (UID: \"3c9f0811-92a3-4681-b71e-28d474c3751e\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-4gn9q" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.170445 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-c9dqn"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.171437 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-c9dqn" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.179952 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-bftt7" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.185439 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f798c\" (UniqueName: \"kubernetes.io/projected/cb4c2469-9178-463d-a9be-700af973c9b8-kube-api-access-f798c\") pod \"ironic-operator-controller-manager-67cb4dc6d4-hg5f2\" (UID: \"cb4c2469-9178-463d-a9be-700af973c9b8\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-hg5f2" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.185553 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-c9dqn"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.195478 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8s6s\" (UniqueName: \"kubernetes.io/projected/d437c518-be55-44c0-b374-5c3d2d62b49a-kube-api-access-v8s6s\") pod \"infra-operator-controller-manager-57548d458d-qb9wz\" (UID: \"d437c518-be55-44c0-b374-5c3d2d62b49a\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.195762 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-4gn9q" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.223545 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-hg5f2" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.230250 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxqgd\" (UniqueName: \"kubernetes.io/projected/49f7d7ee-64a4-4ce9-91ec-a76be0cdd249-kube-api-access-bxqgd\") pod \"keystone-operator-controller-manager-7b4567c7cf-r9w5w\" (UID: \"49f7d7ee-64a4-4ce9-91ec-a76be0cdd249\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-r9w5w" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.230328 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bzlj\" (UniqueName: \"kubernetes.io/projected/502d3010-ae32-4e6f-a7bf-614cd1da9dda-kube-api-access-8bzlj\") pod \"manila-operator-controller-manager-5d499bf58b-4wtvs\" (UID: \"502d3010-ae32-4e6f-a7bf-614cd1da9dda\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4wtvs" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.230358 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9d2q\" (UniqueName: \"kubernetes.io/projected/968d6179-1a75-405c-97cc-cad775d59e28-kube-api-access-p9d2q\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-x4dfd\" (UID: \"968d6179-1a75-405c-97cc-cad775d59e28\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-x4dfd" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.253824 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-nv4bx"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.254882 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nv4bx" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.266434 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-msp7g" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.306474 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-6t48w" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.310648 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-fr6n9" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.319482 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-cwbkd" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.334031 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5d5br\" (UniqueName: \"kubernetes.io/projected/4f45a249-50b4-466c-a54b-9205e5a127e7-kube-api-access-5d5br\") pod \"nova-operator-controller-manager-79556f57fc-nv4bx\" (UID: \"4f45a249-50b4-466c-a54b-9205e5a127e7\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nv4bx" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.334360 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrdtt\" (UniqueName: \"kubernetes.io/projected/a4e0aa4d-510c-4880-84fd-998e7527e41d-kube-api-access-rrdtt\") pod \"neutron-operator-controller-manager-6fdcddb789-c9dqn\" (UID: \"a4e0aa4d-510c-4880-84fd-998e7527e41d\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-c9dqn" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.346847 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-nv4bx"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.361137 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-zzqvn"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.362369 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-zzqvn" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.366507 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bzlj\" (UniqueName: \"kubernetes.io/projected/502d3010-ae32-4e6f-a7bf-614cd1da9dda-kube-api-access-8bzlj\") pod \"manila-operator-controller-manager-5d499bf58b-4wtvs\" (UID: \"502d3010-ae32-4e6f-a7bf-614cd1da9dda\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4wtvs" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.373873 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9d2q\" (UniqueName: \"kubernetes.io/projected/968d6179-1a75-405c-97cc-cad775d59e28-kube-api-access-p9d2q\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-x4dfd\" (UID: \"968d6179-1a75-405c-97cc-cad775d59e28\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-x4dfd" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.379682 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-rw5j8" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.403523 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-zzqvn"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.411455 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-7lsxw"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.415919 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-7lsxw" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.422626 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-678fz" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.432865 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-7lsxw"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.510772 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxqgd\" (UniqueName: \"kubernetes.io/projected/49f7d7ee-64a4-4ce9-91ec-a76be0cdd249-kube-api-access-bxqgd\") pod \"keystone-operator-controller-manager-7b4567c7cf-r9w5w\" (UID: \"49f7d7ee-64a4-4ce9-91ec-a76be0cdd249\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-r9w5w" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.511044 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5d5br\" (UniqueName: \"kubernetes.io/projected/4f45a249-50b4-466c-a54b-9205e5a127e7-kube-api-access-5d5br\") pod \"nova-operator-controller-manager-79556f57fc-nv4bx\" (UID: \"4f45a249-50b4-466c-a54b-9205e5a127e7\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nv4bx" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.511121 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrdtt\" (UniqueName: \"kubernetes.io/projected/a4e0aa4d-510c-4880-84fd-998e7527e41d-kube-api-access-rrdtt\") pod \"neutron-operator-controller-manager-6fdcddb789-c9dqn\" (UID: \"a4e0aa4d-510c-4880-84fd-998e7527e41d\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-c9dqn" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.512349 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-x4dfd" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.525207 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4wtvs" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.546363 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5d5br\" (UniqueName: \"kubernetes.io/projected/4f45a249-50b4-466c-a54b-9205e5a127e7-kube-api-access-5d5br\") pod \"nova-operator-controller-manager-79556f57fc-nv4bx\" (UID: \"4f45a249-50b4-466c-a54b-9205e5a127e7\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nv4bx" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.546582 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrdtt\" (UniqueName: \"kubernetes.io/projected/a4e0aa4d-510c-4880-84fd-998e7527e41d-kube-api-access-rrdtt\") pod \"neutron-operator-controller-manager-6fdcddb789-c9dqn\" (UID: \"a4e0aa4d-510c-4880-84fd-998e7527e41d\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-c9dqn" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.549400 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-r9w5w" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.557897 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.562724 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.574672 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.589870 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-ngcdx"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.591201 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-ngcdx" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.613356 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-ngcdx"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.615283 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvfl2\" (UniqueName: \"kubernetes.io/projected/78748478-834d-4797-b214-b72206253e23-kube-api-access-wvfl2\") pod \"octavia-operator-controller-manager-64cdc6ff96-zzqvn\" (UID: \"78748478-834d-4797-b214-b72206253e23\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-zzqvn" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.615497 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvh7n\" (UniqueName: \"kubernetes.io/projected/faeedd17-af99-4393-bf5c-ac5cc3b2d7b5-kube-api-access-lvh7n\") pod \"ovn-operator-controller-manager-56897c768d-7lsxw\" (UID: \"faeedd17-af99-4393-bf5c-ac5cc3b2d7b5\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-7lsxw" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.618994 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-64gmq"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.620173 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-64gmq" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.631022 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-64gmq"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.631369 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nv4bx" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.635967 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-dlmmw"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.637323 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-dlmmw" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.641820 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-nwrtk"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.643258 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-nwrtk" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.647980 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-ggrfl"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.648676 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-skmpz" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.648865 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-h4vvj" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.648880 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.648956 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-pfgw4" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.648989 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-8ct8b" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.649101 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-r8bgq" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.649161 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-ggrfl" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.653591 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-h6vss" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.654427 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-dlmmw"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.676232 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-nwrtk"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.707809 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-ggrfl"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.716859 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert\") pod \"infra-operator-controller-manager-57548d458d-qb9wz\" (UID: \"d437c518-be55-44c0-b374-5c3d2d62b49a\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.716915 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvh7n\" (UniqueName: \"kubernetes.io/projected/faeedd17-af99-4393-bf5c-ac5cc3b2d7b5-kube-api-access-lvh7n\") pod \"ovn-operator-controller-manager-56897c768d-7lsxw\" (UID: \"faeedd17-af99-4393-bf5c-ac5cc3b2d7b5\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-7lsxw" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.716947 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvfl2\" (UniqueName: \"kubernetes.io/projected/78748478-834d-4797-b214-b72206253e23-kube-api-access-wvfl2\") pod \"octavia-operator-controller-manager-64cdc6ff96-zzqvn\" (UID: \"78748478-834d-4797-b214-b72206253e23\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-zzqvn" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.716975 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnf8f\" (UniqueName: \"kubernetes.io/projected/f39d1519-87df-476d-b47a-8b2857c23843-kube-api-access-pnf8f\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm\" (UID: \"f39d1519-87df-476d-b47a-8b2857c23843\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.716999 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f39d1519-87df-476d-b47a-8b2857c23843-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm\" (UID: \"f39d1519-87df-476d-b47a-8b2857c23843\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.717015 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6s65x\" (UniqueName: \"kubernetes.io/projected/90c064f2-ec25-44c8-ab5a-17fdb307cfe6-kube-api-access-6s65x\") pod \"placement-operator-controller-manager-57988cc5b5-ngcdx\" (UID: \"90c064f2-ec25-44c8-ab5a-17fdb307cfe6\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-ngcdx" Nov 28 13:37:19 crc kubenswrapper[4857]: E1128 13:37:19.717146 4857 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 13:37:19 crc kubenswrapper[4857]: E1128 13:37:19.717184 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert podName:d437c518-be55-44c0-b374-5c3d2d62b49a nodeName:}" failed. No retries permitted until 2025-11-28 13:37:20.71717044 +0000 UTC m=+1132.744545607 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert") pod "infra-operator-controller-manager-57548d458d-qb9wz" (UID: "d437c518-be55-44c0-b374-5c3d2d62b49a") : secret "infra-operator-webhook-server-cert" not found Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.726040 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.728224 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.731398 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.731569 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.732613 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.732694 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-857tv" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.741251 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvfl2\" (UniqueName: \"kubernetes.io/projected/78748478-834d-4797-b214-b72206253e23-kube-api-access-wvfl2\") pod \"octavia-operator-controller-manager-64cdc6ff96-zzqvn\" (UID: \"78748478-834d-4797-b214-b72206253e23\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-zzqvn" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.753467 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b9nj2"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.754407 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b9nj2" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.759713 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvh7n\" (UniqueName: \"kubernetes.io/projected/faeedd17-af99-4393-bf5c-ac5cc3b2d7b5-kube-api-access-lvh7n\") pod \"ovn-operator-controller-manager-56897c768d-7lsxw\" (UID: \"faeedd17-af99-4393-bf5c-ac5cc3b2d7b5\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-7lsxw" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.760152 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-8dwm8" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.773344 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b9nj2"] Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.818198 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9wjg\" (UniqueName: \"kubernetes.io/projected/de166f90-9b9c-49b3-b12b-0e36ae5db4da-kube-api-access-q9wjg\") pod \"telemetry-operator-controller-manager-76cc84c6bb-nwrtk\" (UID: \"de166f90-9b9c-49b3-b12b-0e36ae5db4da\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-nwrtk" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.818281 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgkx5\" (UniqueName: \"kubernetes.io/projected/5c39c627-3379-4971-8d55-48bede6d34ec-kube-api-access-mgkx5\") pod \"watcher-operator-controller-manager-656dcb59d4-ggrfl\" (UID: \"5c39c627-3379-4971-8d55-48bede6d34ec\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-ggrfl" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.818338 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hk7w\" (UniqueName: \"kubernetes.io/projected/729a83b9-4e01-4943-abed-58960ed40e68-kube-api-access-6hk7w\") pod \"swift-operator-controller-manager-d77b94747-64gmq\" (UID: \"729a83b9-4e01-4943-abed-58960ed40e68\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-64gmq" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.818382 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnf8f\" (UniqueName: \"kubernetes.io/projected/f39d1519-87df-476d-b47a-8b2857c23843-kube-api-access-pnf8f\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm\" (UID: \"f39d1519-87df-476d-b47a-8b2857c23843\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.818403 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f39d1519-87df-476d-b47a-8b2857c23843-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm\" (UID: \"f39d1519-87df-476d-b47a-8b2857c23843\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.818421 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6s65x\" (UniqueName: \"kubernetes.io/projected/90c064f2-ec25-44c8-ab5a-17fdb307cfe6-kube-api-access-6s65x\") pod \"placement-operator-controller-manager-57988cc5b5-ngcdx\" (UID: \"90c064f2-ec25-44c8-ab5a-17fdb307cfe6\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-ngcdx" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.818442 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxtqd\" (UniqueName: \"kubernetes.io/projected/af0c9c82-73f4-4bff-b0f6-a94c0d6e731a-kube-api-access-dxtqd\") pod \"test-operator-controller-manager-5cd6c7f4c8-dlmmw\" (UID: \"af0c9c82-73f4-4bff-b0f6-a94c0d6e731a\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-dlmmw" Nov 28 13:37:19 crc kubenswrapper[4857]: E1128 13:37:19.818560 4857 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:37:19 crc kubenswrapper[4857]: E1128 13:37:19.818594 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f39d1519-87df-476d-b47a-8b2857c23843-cert podName:f39d1519-87df-476d-b47a-8b2857c23843 nodeName:}" failed. No retries permitted until 2025-11-28 13:37:20.31858194 +0000 UTC m=+1132.345957097 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f39d1519-87df-476d-b47a-8b2857c23843-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" (UID: "f39d1519-87df-476d-b47a-8b2857c23843") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.823297 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-c9dqn" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.828074 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-7lsxw" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.844815 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6s65x\" (UniqueName: \"kubernetes.io/projected/90c064f2-ec25-44c8-ab5a-17fdb307cfe6-kube-api-access-6s65x\") pod \"placement-operator-controller-manager-57988cc5b5-ngcdx\" (UID: \"90c064f2-ec25-44c8-ab5a-17fdb307cfe6\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-ngcdx" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.845419 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnf8f\" (UniqueName: \"kubernetes.io/projected/f39d1519-87df-476d-b47a-8b2857c23843-kube-api-access-pnf8f\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm\" (UID: \"f39d1519-87df-476d-b47a-8b2857c23843\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.921427 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnzf4\" (UniqueName: \"kubernetes.io/projected/340e937e-6fd3-4fd4-829e-2ac5972542b7-kube-api-access-vnzf4\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.921964 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-metrics-certs\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.922009 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9wjg\" (UniqueName: \"kubernetes.io/projected/de166f90-9b9c-49b3-b12b-0e36ae5db4da-kube-api-access-q9wjg\") pod \"telemetry-operator-controller-manager-76cc84c6bb-nwrtk\" (UID: \"de166f90-9b9c-49b3-b12b-0e36ae5db4da\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-nwrtk" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.922029 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgxg7\" (UniqueName: \"kubernetes.io/projected/5d82c76b-a0e1-4001-8676-390818e9edaf-kube-api-access-vgxg7\") pod \"rabbitmq-cluster-operator-manager-668c99d594-b9nj2\" (UID: \"5d82c76b-a0e1-4001-8676-390818e9edaf\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b9nj2" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.922140 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgkx5\" (UniqueName: \"kubernetes.io/projected/5c39c627-3379-4971-8d55-48bede6d34ec-kube-api-access-mgkx5\") pod \"watcher-operator-controller-manager-656dcb59d4-ggrfl\" (UID: \"5c39c627-3379-4971-8d55-48bede6d34ec\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-ggrfl" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.922178 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-webhook-certs\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.922206 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hk7w\" (UniqueName: \"kubernetes.io/projected/729a83b9-4e01-4943-abed-58960ed40e68-kube-api-access-6hk7w\") pod \"swift-operator-controller-manager-d77b94747-64gmq\" (UID: \"729a83b9-4e01-4943-abed-58960ed40e68\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-64gmq" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.922248 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxtqd\" (UniqueName: \"kubernetes.io/projected/af0c9c82-73f4-4bff-b0f6-a94c0d6e731a-kube-api-access-dxtqd\") pod \"test-operator-controller-manager-5cd6c7f4c8-dlmmw\" (UID: \"af0c9c82-73f4-4bff-b0f6-a94c0d6e731a\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-dlmmw" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.924694 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-fp6fc"] Nov 28 13:37:19 crc kubenswrapper[4857]: W1128 13:37:19.935072 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod41a3b8e7_e61b_45fc_a87e_99e2d943fd15.slice/crio-c330577b3e91097b437005e9a16866dd4cff55bb40d4d2f8ceb2f7b2bea5d33b WatchSource:0}: Error finding container c330577b3e91097b437005e9a16866dd4cff55bb40d4d2f8ceb2f7b2bea5d33b: Status 404 returned error can't find the container with id c330577b3e91097b437005e9a16866dd4cff55bb40d4d2f8ceb2f7b2bea5d33b Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.947046 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9wjg\" (UniqueName: \"kubernetes.io/projected/de166f90-9b9c-49b3-b12b-0e36ae5db4da-kube-api-access-q9wjg\") pod \"telemetry-operator-controller-manager-76cc84c6bb-nwrtk\" (UID: \"de166f90-9b9c-49b3-b12b-0e36ae5db4da\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-nwrtk" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.952864 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxtqd\" (UniqueName: \"kubernetes.io/projected/af0c9c82-73f4-4bff-b0f6-a94c0d6e731a-kube-api-access-dxtqd\") pod \"test-operator-controller-manager-5cd6c7f4c8-dlmmw\" (UID: \"af0c9c82-73f4-4bff-b0f6-a94c0d6e731a\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-dlmmw" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.964275 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgkx5\" (UniqueName: \"kubernetes.io/projected/5c39c627-3379-4971-8d55-48bede6d34ec-kube-api-access-mgkx5\") pod \"watcher-operator-controller-manager-656dcb59d4-ggrfl\" (UID: \"5c39c627-3379-4971-8d55-48bede6d34ec\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-ggrfl" Nov 28 13:37:19 crc kubenswrapper[4857]: I1128 13:37:19.974809 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hk7w\" (UniqueName: \"kubernetes.io/projected/729a83b9-4e01-4943-abed-58960ed40e68-kube-api-access-6hk7w\") pod \"swift-operator-controller-manager-d77b94747-64gmq\" (UID: \"729a83b9-4e01-4943-abed-58960ed40e68\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-64gmq" Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.023341 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnzf4\" (UniqueName: \"kubernetes.io/projected/340e937e-6fd3-4fd4-829e-2ac5972542b7-kube-api-access-vnzf4\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.023409 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-metrics-certs\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.023439 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgxg7\" (UniqueName: \"kubernetes.io/projected/5d82c76b-a0e1-4001-8676-390818e9edaf-kube-api-access-vgxg7\") pod \"rabbitmq-cluster-operator-manager-668c99d594-b9nj2\" (UID: \"5d82c76b-a0e1-4001-8676-390818e9edaf\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b9nj2" Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.023527 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-webhook-certs\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:20 crc kubenswrapper[4857]: E1128 13:37:20.023654 4857 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 13:37:20 crc kubenswrapper[4857]: E1128 13:37:20.023709 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-webhook-certs podName:340e937e-6fd3-4fd4-829e-2ac5972542b7 nodeName:}" failed. No retries permitted until 2025-11-28 13:37:20.523690265 +0000 UTC m=+1132.551065432 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-webhook-certs") pod "openstack-operator-controller-manager-888bbc64f-m879m" (UID: "340e937e-6fd3-4fd4-829e-2ac5972542b7") : secret "webhook-server-cert" not found Nov 28 13:37:20 crc kubenswrapper[4857]: E1128 13:37:20.024329 4857 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 13:37:20 crc kubenswrapper[4857]: E1128 13:37:20.024366 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-metrics-certs podName:340e937e-6fd3-4fd4-829e-2ac5972542b7 nodeName:}" failed. No retries permitted until 2025-11-28 13:37:20.524354974 +0000 UTC m=+1132.551730141 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-metrics-certs") pod "openstack-operator-controller-manager-888bbc64f-m879m" (UID: "340e937e-6fd3-4fd4-829e-2ac5972542b7") : secret "metrics-server-cert" not found Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.029118 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-zzqvn" Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.039615 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-ngcdx" Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.049802 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgxg7\" (UniqueName: \"kubernetes.io/projected/5d82c76b-a0e1-4001-8676-390818e9edaf-kube-api-access-vgxg7\") pod \"rabbitmq-cluster-operator-manager-668c99d594-b9nj2\" (UID: \"5d82c76b-a0e1-4001-8676-390818e9edaf\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b9nj2" Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.053587 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnzf4\" (UniqueName: \"kubernetes.io/projected/340e937e-6fd3-4fd4-829e-2ac5972542b7-kube-api-access-vnzf4\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.147542 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-wkfb4"] Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.172390 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-fp6fc" event={"ID":"41a3b8e7-e61b-45fc-a87e-99e2d943fd15","Type":"ContainerStarted","Data":"c330577b3e91097b437005e9a16866dd4cff55bb40d4d2f8ceb2f7b2bea5d33b"} Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.199513 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-64gmq" Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.216089 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-dlmmw" Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.244371 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-nwrtk" Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.263710 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-ggrfl" Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.304628 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b9nj2" Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.330144 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f39d1519-87df-476d-b47a-8b2857c23843-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm\" (UID: \"f39d1519-87df-476d-b47a-8b2857c23843\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" Nov 28 13:37:20 crc kubenswrapper[4857]: E1128 13:37:20.330313 4857 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:37:20 crc kubenswrapper[4857]: E1128 13:37:20.330363 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f39d1519-87df-476d-b47a-8b2857c23843-cert podName:f39d1519-87df-476d-b47a-8b2857c23843 nodeName:}" failed. No retries permitted until 2025-11-28 13:37:21.330349023 +0000 UTC m=+1133.357724180 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f39d1519-87df-476d-b47a-8b2857c23843-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" (UID: "f39d1519-87df-476d-b47a-8b2857c23843") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.529885 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-x4dfd"] Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.534674 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-metrics-certs\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.534769 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-webhook-certs\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:20 crc kubenswrapper[4857]: E1128 13:37:20.534878 4857 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 13:37:20 crc kubenswrapper[4857]: E1128 13:37:20.534926 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-webhook-certs podName:340e937e-6fd3-4fd4-829e-2ac5972542b7 nodeName:}" failed. No retries permitted until 2025-11-28 13:37:21.534912272 +0000 UTC m=+1133.562287439 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-webhook-certs") pod "openstack-operator-controller-manager-888bbc64f-m879m" (UID: "340e937e-6fd3-4fd4-829e-2ac5972542b7") : secret "webhook-server-cert" not found Nov 28 13:37:20 crc kubenswrapper[4857]: E1128 13:37:20.534879 4857 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 13:37:20 crc kubenswrapper[4857]: E1128 13:37:20.534993 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-metrics-certs podName:340e937e-6fd3-4fd4-829e-2ac5972542b7 nodeName:}" failed. No retries permitted until 2025-11-28 13:37:21.534982564 +0000 UTC m=+1133.562357731 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-metrics-certs") pod "openstack-operator-controller-manager-888bbc64f-m879m" (UID: "340e937e-6fd3-4fd4-829e-2ac5972542b7") : secret "metrics-server-cert" not found Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.541427 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-hg5f2"] Nov 28 13:37:20 crc kubenswrapper[4857]: W1128 13:37:20.550864 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod968d6179_1a75_405c_97cc_cad775d59e28.slice/crio-3ddac37762109a2b26c7a7d8b457de0d2e520bc3582ed25fd11e7d859378d645 WatchSource:0}: Error finding container 3ddac37762109a2b26c7a7d8b457de0d2e520bc3582ed25fd11e7d859378d645: Status 404 returned error can't find the container with id 3ddac37762109a2b26c7a7d8b457de0d2e520bc3582ed25fd11e7d859378d645 Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.563069 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-4gn9q"] Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.643995 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-4wtvs"] Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.657917 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-cwbkd"] Nov 28 13:37:20 crc kubenswrapper[4857]: W1128 13:37:20.659878 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod502d3010_ae32_4e6f_a7bf_614cd1da9dda.slice/crio-d793823960da6b0b702a87b8a353d92a3d2e54e027bc86e57129aac408acdd90 WatchSource:0}: Error finding container d793823960da6b0b702a87b8a353d92a3d2e54e027bc86e57129aac408acdd90: Status 404 returned error can't find the container with id d793823960da6b0b702a87b8a353d92a3d2e54e027bc86e57129aac408acdd90 Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.663351 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-r9w5w"] Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.689598 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-msp7g"] Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.737074 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert\") pod \"infra-operator-controller-manager-57548d458d-qb9wz\" (UID: \"d437c518-be55-44c0-b374-5c3d2d62b49a\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" Nov 28 13:37:20 crc kubenswrapper[4857]: E1128 13:37:20.737217 4857 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 13:37:20 crc kubenswrapper[4857]: E1128 13:37:20.737265 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert podName:d437c518-be55-44c0-b374-5c3d2d62b49a nodeName:}" failed. No retries permitted until 2025-11-28 13:37:22.737250486 +0000 UTC m=+1134.764625653 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert") pod "infra-operator-controller-manager-57548d458d-qb9wz" (UID: "d437c518-be55-44c0-b374-5c3d2d62b49a") : secret "infra-operator-webhook-server-cert" not found Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.776857 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-fr6n9"] Nov 28 13:37:20 crc kubenswrapper[4857]: W1128 13:37:20.782140 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfaeedd17_af99_4393_bf5c_ac5cc3b2d7b5.slice/crio-40ba67928b94be8b38f43a84c41e067fe1960fcdc9aecfa312b6a3ad292ba42d WatchSource:0}: Error finding container 40ba67928b94be8b38f43a84c41e067fe1960fcdc9aecfa312b6a3ad292ba42d: Status 404 returned error can't find the container with id 40ba67928b94be8b38f43a84c41e067fe1960fcdc9aecfa312b6a3ad292ba42d Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.788451 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-7lsxw"] Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.802541 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-nv4bx"] Nov 28 13:37:20 crc kubenswrapper[4857]: W1128 13:37:20.808265 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4f45a249_50b4_466c_a54b_9205e5a127e7.slice/crio-ad9abe18a31cd9f2c62a144b31c624bcd5537cec7646be4c92ff75c6df825f85 WatchSource:0}: Error finding container ad9abe18a31cd9f2c62a144b31c624bcd5537cec7646be4c92ff75c6df825f85: Status 404 returned error can't find the container with id ad9abe18a31cd9f2c62a144b31c624bcd5537cec7646be4c92ff75c6df825f85 Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.814088 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-c9dqn"] Nov 28 13:37:20 crc kubenswrapper[4857]: W1128 13:37:20.821282 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda4e0aa4d_510c_4880_84fd_998e7527e41d.slice/crio-c8c51401531332b3ceb4bb191e555d4d08bb3082b13d9018349b3167c8c0b03d WatchSource:0}: Error finding container c8c51401531332b3ceb4bb191e555d4d08bb3082b13d9018349b3167c8c0b03d: Status 404 returned error can't find the container with id c8c51401531332b3ceb4bb191e555d4d08bb3082b13d9018349b3167c8c0b03d Nov 28 13:37:20 crc kubenswrapper[4857]: I1128 13:37:20.993885 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-ngcdx"] Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.004994 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-zzqvn"] Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.014002 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-64gmq"] Nov 28 13:37:21 crc kubenswrapper[4857]: W1128 13:37:21.018785 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod729a83b9_4e01_4943_abed_58960ed40e68.slice/crio-0b1896150b0152bdb4643dff13dd809d798e1012fa65d11e60ffc0aa3749efe3 WatchSource:0}: Error finding container 0b1896150b0152bdb4643dff13dd809d798e1012fa65d11e60ffc0aa3749efe3: Status 404 returned error can't find the container with id 0b1896150b0152bdb4643dff13dd809d798e1012fa65d11e60ffc0aa3749efe3 Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.021617 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6s65x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-57988cc5b5-ngcdx_openstack-operators(90c064f2-ec25-44c8-ab5a-17fdb307cfe6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.024575 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6hk7w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d77b94747-64gmq_openstack-operators(729a83b9-4e01-4943-abed-58960ed40e68): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.024600 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6s65x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-57988cc5b5-ngcdx_openstack-operators(90c064f2-ec25-44c8-ab5a-17fdb307cfe6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.025952 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-ngcdx" podUID="90c064f2-ec25-44c8-ab5a-17fdb307cfe6" Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.026880 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6hk7w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d77b94747-64gmq_openstack-operators(729a83b9-4e01-4943-abed-58960ed40e68): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.028033 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-64gmq" podUID="729a83b9-4e01-4943-abed-58960ed40e68" Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.080445 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-dlmmw"] Nov 28 13:37:21 crc kubenswrapper[4857]: W1128 13:37:21.085960 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5d82c76b_a0e1_4001_8676_390818e9edaf.slice/crio-a59617fe945134478570379ebbe2611e82c52e4cfe67316d0bfc283a0754d003 WatchSource:0}: Error finding container a59617fe945134478570379ebbe2611e82c52e4cfe67316d0bfc283a0754d003: Status 404 returned error can't find the container with id a59617fe945134478570379ebbe2611e82c52e4cfe67316d0bfc283a0754d003 Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.087014 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b9nj2"] Nov 28 13:37:21 crc kubenswrapper[4857]: W1128 13:37:21.087945 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf0c9c82_73f4_4bff_b0f6_a94c0d6e731a.slice/crio-9dda87c59fe849bf3d3be91178979d4cb726a89fc97f83d8aac57012daafc1b5 WatchSource:0}: Error finding container 9dda87c59fe849bf3d3be91178979d4cb726a89fc97f83d8aac57012daafc1b5: Status 404 returned error can't find the container with id 9dda87c59fe849bf3d3be91178979d4cb726a89fc97f83d8aac57012daafc1b5 Nov 28 13:37:21 crc kubenswrapper[4857]: W1128 13:37:21.088900 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde166f90_9b9c_49b3_b12b_0e36ae5db4da.slice/crio-895992579b4a008e83fac1b074b254834d2d0fe087208a70e627f67dd9925c45 WatchSource:0}: Error finding container 895992579b4a008e83fac1b074b254834d2d0fe087208a70e627f67dd9925c45: Status 404 returned error can't find the container with id 895992579b4a008e83fac1b074b254834d2d0fe087208a70e627f67dd9925c45 Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.092132 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-q9wjg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-nwrtk_openstack-operators(de166f90-9b9c-49b3-b12b-0e36ae5db4da): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.093049 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dxtqd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-dlmmw_openstack-operators(af0c9c82-73f4-4bff-b0f6-a94c0d6e731a): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.093247 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vgxg7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-b9nj2_openstack-operators(5d82c76b-a0e1-4001-8676-390818e9edaf): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.093706 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-q9wjg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-nwrtk_openstack-operators(de166f90-9b9c-49b3-b12b-0e36ae5db4da): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.094969 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-nwrtk" podUID="de166f90-9b9c-49b3-b12b-0e36ae5db4da" Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.095016 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b9nj2" podUID="5d82c76b-a0e1-4001-8676-390818e9edaf" Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.098419 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dxtqd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-dlmmw_openstack-operators(af0c9c82-73f4-4bff-b0f6-a94c0d6e731a): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.099584 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-dlmmw" podUID="af0c9c82-73f4-4bff-b0f6-a94c0d6e731a" Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.105770 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-ggrfl"] Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.112128 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mgkx5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-ggrfl_openstack-operators(5c39c627-3379-4971-8d55-48bede6d34ec): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.114249 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mgkx5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-ggrfl_openstack-operators(5c39c627-3379-4971-8d55-48bede6d34ec): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.115836 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-ggrfl" podUID="5c39c627-3379-4971-8d55-48bede6d34ec" Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.116128 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-nwrtk"] Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.180949 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-64gmq" event={"ID":"729a83b9-4e01-4943-abed-58960ed40e68","Type":"ContainerStarted","Data":"0b1896150b0152bdb4643dff13dd809d798e1012fa65d11e60ffc0aa3749efe3"} Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.182656 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-ngcdx" event={"ID":"90c064f2-ec25-44c8-ab5a-17fdb307cfe6","Type":"ContainerStarted","Data":"c42764efc4097a3b4f5cf0e0a929a3bee8e89048d0823540ca232339d5198cce"} Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.184581 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-c9dqn" event={"ID":"a4e0aa4d-510c-4880-84fd-998e7527e41d","Type":"ContainerStarted","Data":"c8c51401531332b3ceb4bb191e555d4d08bb3082b13d9018349b3167c8c0b03d"} Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.184603 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-64gmq" podUID="729a83b9-4e01-4943-abed-58960ed40e68" Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.185846 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-x4dfd" event={"ID":"968d6179-1a75-405c-97cc-cad775d59e28","Type":"ContainerStarted","Data":"3ddac37762109a2b26c7a7d8b457de0d2e520bc3582ed25fd11e7d859378d645"} Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.186938 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-msp7g" event={"ID":"8ba8130c-f7e1-4cc0-8427-5a13997138ce","Type":"ContainerStarted","Data":"3cd626c490c8a2ce99f93bf968c07325869ae49d74f4ff502bb79f7b9363b9de"} Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.187279 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-ngcdx" podUID="90c064f2-ec25-44c8-ab5a-17fdb307cfe6" Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.188449 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-nwrtk" event={"ID":"de166f90-9b9c-49b3-b12b-0e36ae5db4da","Type":"ContainerStarted","Data":"895992579b4a008e83fac1b074b254834d2d0fe087208a70e627f67dd9925c45"} Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.191729 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-nwrtk" podUID="de166f90-9b9c-49b3-b12b-0e36ae5db4da" Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.193502 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-ggrfl" event={"ID":"5c39c627-3379-4971-8d55-48bede6d34ec","Type":"ContainerStarted","Data":"49306d7d029dec9f5b8e35cbb1c64921db979ade6cd4e39a869e5a83eeb2985d"} Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.197581 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-ggrfl" podUID="5c39c627-3379-4971-8d55-48bede6d34ec" Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.206220 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-hg5f2" event={"ID":"cb4c2469-9178-463d-a9be-700af973c9b8","Type":"ContainerStarted","Data":"24c7cadb8a5775b5ef5012c29cac27f2cbd797e1e3cd1dd29bc561a513ec531b"} Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.208354 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-7lsxw" event={"ID":"faeedd17-af99-4393-bf5c-ac5cc3b2d7b5","Type":"ContainerStarted","Data":"40ba67928b94be8b38f43a84c41e067fe1960fcdc9aecfa312b6a3ad292ba42d"} Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.212400 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-dlmmw" event={"ID":"af0c9c82-73f4-4bff-b0f6-a94c0d6e731a","Type":"ContainerStarted","Data":"9dda87c59fe849bf3d3be91178979d4cb726a89fc97f83d8aac57012daafc1b5"} Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.218054 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-dlmmw" podUID="af0c9c82-73f4-4bff-b0f6-a94c0d6e731a" Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.218067 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-zzqvn" event={"ID":"78748478-834d-4797-b214-b72206253e23","Type":"ContainerStarted","Data":"914158e8369082c110101c5fcacf6275006f0d0f1cb7a8654cef3e090adcaaff"} Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.220345 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nv4bx" event={"ID":"4f45a249-50b4-466c-a54b-9205e5a127e7","Type":"ContainerStarted","Data":"ad9abe18a31cd9f2c62a144b31c624bcd5537cec7646be4c92ff75c6df825f85"} Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.222211 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b9nj2" event={"ID":"5d82c76b-a0e1-4001-8676-390818e9edaf","Type":"ContainerStarted","Data":"a59617fe945134478570379ebbe2611e82c52e4cfe67316d0bfc283a0754d003"} Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.223738 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b9nj2" podUID="5d82c76b-a0e1-4001-8676-390818e9edaf" Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.232269 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-cwbkd" event={"ID":"b35bb4aa-b164-47fe-85bd-8f34b7e55e5e","Type":"ContainerStarted","Data":"42c569dffc769ecbcfff28b0f7393c44a60c1c6fec80a37ee4a0f8f4c64c599a"} Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.233683 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-fr6n9" event={"ID":"42e0b127-fc00-42c7-a7d8-9e5ea55a6590","Type":"ContainerStarted","Data":"46725324ba022152b46be52c344c48da61848da41887a101d38c4c7c8696e813"} Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.235138 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-wkfb4" event={"ID":"e4010942-0dec-4e3e-8f52-f69abf7ace10","Type":"ContainerStarted","Data":"6cff5fcb83f64b42e5ff1c6e4e9a01cce09e682d983634478b8965741d1c0991"} Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.237500 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4wtvs" event={"ID":"502d3010-ae32-4e6f-a7bf-614cd1da9dda","Type":"ContainerStarted","Data":"d793823960da6b0b702a87b8a353d92a3d2e54e027bc86e57129aac408acdd90"} Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.238960 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-r9w5w" event={"ID":"49f7d7ee-64a4-4ce9-91ec-a76be0cdd249","Type":"ContainerStarted","Data":"04447d22cdb2f990f940c738ebe15142e1d4bdd7f820b2050f4a8f8d778fdbbb"} Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.244472 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-4gn9q" event={"ID":"3c9f0811-92a3-4681-b71e-28d474c3751e","Type":"ContainerStarted","Data":"4fe88604b0ad84786ed3c79b3ede47b6d142166e0657ca5e765cfabeddcd36ab"} Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.348076 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f39d1519-87df-476d-b47a-8b2857c23843-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm\" (UID: \"f39d1519-87df-476d-b47a-8b2857c23843\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.348616 4857 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.348959 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f39d1519-87df-476d-b47a-8b2857c23843-cert podName:f39d1519-87df-476d-b47a-8b2857c23843 nodeName:}" failed. No retries permitted until 2025-11-28 13:37:23.348938435 +0000 UTC m=+1135.376313602 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f39d1519-87df-476d-b47a-8b2857c23843-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" (UID: "f39d1519-87df-476d-b47a-8b2857c23843") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.551662 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-webhook-certs\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.551810 4857 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.551885 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-webhook-certs podName:340e937e-6fd3-4fd4-829e-2ac5972542b7 nodeName:}" failed. No retries permitted until 2025-11-28 13:37:23.551868276 +0000 UTC m=+1135.579243443 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-webhook-certs") pod "openstack-operator-controller-manager-888bbc64f-m879m" (UID: "340e937e-6fd3-4fd4-829e-2ac5972542b7") : secret "webhook-server-cert" not found Nov 28 13:37:21 crc kubenswrapper[4857]: I1128 13:37:21.552104 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-metrics-certs\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.552306 4857 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 13:37:21 crc kubenswrapper[4857]: E1128 13:37:21.552394 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-metrics-certs podName:340e937e-6fd3-4fd4-829e-2ac5972542b7 nodeName:}" failed. No retries permitted until 2025-11-28 13:37:23.552373971 +0000 UTC m=+1135.579749208 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-metrics-certs") pod "openstack-operator-controller-manager-888bbc64f-m879m" (UID: "340e937e-6fd3-4fd4-829e-2ac5972542b7") : secret "metrics-server-cert" not found Nov 28 13:37:22 crc kubenswrapper[4857]: E1128 13:37:22.261781 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b9nj2" podUID="5d82c76b-a0e1-4001-8676-390818e9edaf" Nov 28 13:37:22 crc kubenswrapper[4857]: E1128 13:37:22.263423 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-64gmq" podUID="729a83b9-4e01-4943-abed-58960ed40e68" Nov 28 13:37:22 crc kubenswrapper[4857]: E1128 13:37:22.263433 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-dlmmw" podUID="af0c9c82-73f4-4bff-b0f6-a94c0d6e731a" Nov 28 13:37:22 crc kubenswrapper[4857]: E1128 13:37:22.263469 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-nwrtk" podUID="de166f90-9b9c-49b3-b12b-0e36ae5db4da" Nov 28 13:37:22 crc kubenswrapper[4857]: E1128 13:37:22.263483 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-ngcdx" podUID="90c064f2-ec25-44c8-ab5a-17fdb307cfe6" Nov 28 13:37:22 crc kubenswrapper[4857]: E1128 13:37:22.263553 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-ggrfl" podUID="5c39c627-3379-4971-8d55-48bede6d34ec" Nov 28 13:37:22 crc kubenswrapper[4857]: I1128 13:37:22.783795 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert\") pod \"infra-operator-controller-manager-57548d458d-qb9wz\" (UID: \"d437c518-be55-44c0-b374-5c3d2d62b49a\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" Nov 28 13:37:22 crc kubenswrapper[4857]: E1128 13:37:22.784017 4857 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 13:37:22 crc kubenswrapper[4857]: E1128 13:37:22.784298 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert podName:d437c518-be55-44c0-b374-5c3d2d62b49a nodeName:}" failed. No retries permitted until 2025-11-28 13:37:26.784278887 +0000 UTC m=+1138.811654054 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert") pod "infra-operator-controller-manager-57548d458d-qb9wz" (UID: "d437c518-be55-44c0-b374-5c3d2d62b49a") : secret "infra-operator-webhook-server-cert" not found Nov 28 13:37:23 crc kubenswrapper[4857]: I1128 13:37:23.392783 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f39d1519-87df-476d-b47a-8b2857c23843-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm\" (UID: \"f39d1519-87df-476d-b47a-8b2857c23843\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" Nov 28 13:37:23 crc kubenswrapper[4857]: E1128 13:37:23.393085 4857 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:37:23 crc kubenswrapper[4857]: E1128 13:37:23.393140 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f39d1519-87df-476d-b47a-8b2857c23843-cert podName:f39d1519-87df-476d-b47a-8b2857c23843 nodeName:}" failed. No retries permitted until 2025-11-28 13:37:27.393123193 +0000 UTC m=+1139.420498360 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f39d1519-87df-476d-b47a-8b2857c23843-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" (UID: "f39d1519-87df-476d-b47a-8b2857c23843") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:37:23 crc kubenswrapper[4857]: I1128 13:37:23.595458 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-webhook-certs\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:23 crc kubenswrapper[4857]: I1128 13:37:23.595623 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-metrics-certs\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:23 crc kubenswrapper[4857]: E1128 13:37:23.595686 4857 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 13:37:23 crc kubenswrapper[4857]: E1128 13:37:23.595769 4857 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 13:37:23 crc kubenswrapper[4857]: E1128 13:37:23.595794 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-webhook-certs podName:340e937e-6fd3-4fd4-829e-2ac5972542b7 nodeName:}" failed. No retries permitted until 2025-11-28 13:37:27.595770626 +0000 UTC m=+1139.623145803 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-webhook-certs") pod "openstack-operator-controller-manager-888bbc64f-m879m" (UID: "340e937e-6fd3-4fd4-829e-2ac5972542b7") : secret "webhook-server-cert" not found Nov 28 13:37:23 crc kubenswrapper[4857]: E1128 13:37:23.595835 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-metrics-certs podName:340e937e-6fd3-4fd4-829e-2ac5972542b7 nodeName:}" failed. No retries permitted until 2025-11-28 13:37:27.595818417 +0000 UTC m=+1139.623193574 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-metrics-certs") pod "openstack-operator-controller-manager-888bbc64f-m879m" (UID: "340e937e-6fd3-4fd4-829e-2ac5972542b7") : secret "metrics-server-cert" not found Nov 28 13:37:26 crc kubenswrapper[4857]: I1128 13:37:26.841447 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert\") pod \"infra-operator-controller-manager-57548d458d-qb9wz\" (UID: \"d437c518-be55-44c0-b374-5c3d2d62b49a\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" Nov 28 13:37:26 crc kubenswrapper[4857]: E1128 13:37:26.841643 4857 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 13:37:26 crc kubenswrapper[4857]: E1128 13:37:26.841926 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert podName:d437c518-be55-44c0-b374-5c3d2d62b49a nodeName:}" failed. No retries permitted until 2025-11-28 13:37:34.841904804 +0000 UTC m=+1146.869279971 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert") pod "infra-operator-controller-manager-57548d458d-qb9wz" (UID: "d437c518-be55-44c0-b374-5c3d2d62b49a") : secret "infra-operator-webhook-server-cert" not found Nov 28 13:37:27 crc kubenswrapper[4857]: I1128 13:37:27.447545 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f39d1519-87df-476d-b47a-8b2857c23843-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm\" (UID: \"f39d1519-87df-476d-b47a-8b2857c23843\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" Nov 28 13:37:27 crc kubenswrapper[4857]: E1128 13:37:27.447822 4857 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:37:27 crc kubenswrapper[4857]: E1128 13:37:27.447986 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f39d1519-87df-476d-b47a-8b2857c23843-cert podName:f39d1519-87df-476d-b47a-8b2857c23843 nodeName:}" failed. No retries permitted until 2025-11-28 13:37:35.447962868 +0000 UTC m=+1147.475338095 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/f39d1519-87df-476d-b47a-8b2857c23843-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" (UID: "f39d1519-87df-476d-b47a-8b2857c23843") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:37:27 crc kubenswrapper[4857]: I1128 13:37:27.650201 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-webhook-certs\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:27 crc kubenswrapper[4857]: I1128 13:37:27.650300 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-metrics-certs\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:27 crc kubenswrapper[4857]: E1128 13:37:27.650484 4857 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 13:37:27 crc kubenswrapper[4857]: E1128 13:37:27.650539 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-metrics-certs podName:340e937e-6fd3-4fd4-829e-2ac5972542b7 nodeName:}" failed. No retries permitted until 2025-11-28 13:37:35.650523138 +0000 UTC m=+1147.677898305 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-metrics-certs") pod "openstack-operator-controller-manager-888bbc64f-m879m" (UID: "340e937e-6fd3-4fd4-829e-2ac5972542b7") : secret "metrics-server-cert" not found Nov 28 13:37:27 crc kubenswrapper[4857]: E1128 13:37:27.650544 4857 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 13:37:27 crc kubenswrapper[4857]: E1128 13:37:27.650792 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-webhook-certs podName:340e937e-6fd3-4fd4-829e-2ac5972542b7 nodeName:}" failed. No retries permitted until 2025-11-28 13:37:35.650697663 +0000 UTC m=+1147.678072830 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-webhook-certs") pod "openstack-operator-controller-manager-888bbc64f-m879m" (UID: "340e937e-6fd3-4fd4-829e-2ac5972542b7") : secret "webhook-server-cert" not found Nov 28 13:37:33 crc kubenswrapper[4857]: I1128 13:37:33.178553 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:37:33 crc kubenswrapper[4857]: I1128 13:37:33.179117 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:37:33 crc kubenswrapper[4857]: E1128 13:37:33.769647 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:d65dbfc956e9cf376f3c48fc3a0942cb7306b5164f898c40d1efca106df81db7" Nov 28 13:37:33 crc kubenswrapper[4857]: E1128 13:37:33.770117 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:d65dbfc956e9cf376f3c48fc3a0942cb7306b5164f898c40d1efca106df81db7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-f798c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-67cb4dc6d4-hg5f2_openstack-operators(cb4c2469-9178-463d-a9be-700af973c9b8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:37:34 crc kubenswrapper[4857]: E1128 13:37:34.230534 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c" Nov 28 13:37:34 crc kubenswrapper[4857]: E1128 13:37:34.230730 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wvfl2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-64cdc6ff96-zzqvn_openstack-operators(78748478-834d-4797-b214-b72206253e23): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:37:34 crc kubenswrapper[4857]: I1128 13:37:34.853422 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert\") pod \"infra-operator-controller-manager-57548d458d-qb9wz\" (UID: \"d437c518-be55-44c0-b374-5c3d2d62b49a\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" Nov 28 13:37:34 crc kubenswrapper[4857]: E1128 13:37:34.853600 4857 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 13:37:34 crc kubenswrapper[4857]: E1128 13:37:34.853654 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert podName:d437c518-be55-44c0-b374-5c3d2d62b49a nodeName:}" failed. No retries permitted until 2025-11-28 13:37:50.853635857 +0000 UTC m=+1162.881011024 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert") pod "infra-operator-controller-manager-57548d458d-qb9wz" (UID: "d437c518-be55-44c0-b374-5c3d2d62b49a") : secret "infra-operator-webhook-server-cert" not found Nov 28 13:37:35 crc kubenswrapper[4857]: I1128 13:37:35.311708 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 13:37:35 crc kubenswrapper[4857]: I1128 13:37:35.532371 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f39d1519-87df-476d-b47a-8b2857c23843-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm\" (UID: \"f39d1519-87df-476d-b47a-8b2857c23843\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" Nov 28 13:37:35 crc kubenswrapper[4857]: I1128 13:37:35.539285 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f39d1519-87df-476d-b47a-8b2857c23843-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm\" (UID: \"f39d1519-87df-476d-b47a-8b2857c23843\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" Nov 28 13:37:35 crc kubenswrapper[4857]: I1128 13:37:35.591551 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-pfgw4" Nov 28 13:37:35 crc kubenswrapper[4857]: I1128 13:37:35.599802 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" Nov 28 13:37:35 crc kubenswrapper[4857]: I1128 13:37:35.735089 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-webhook-certs\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:35 crc kubenswrapper[4857]: I1128 13:37:35.735176 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-metrics-certs\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:35 crc kubenswrapper[4857]: I1128 13:37:35.739574 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-webhook-certs\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:35 crc kubenswrapper[4857]: I1128 13:37:35.739992 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/340e937e-6fd3-4fd4-829e-2ac5972542b7-metrics-certs\") pod \"openstack-operator-controller-manager-888bbc64f-m879m\" (UID: \"340e937e-6fd3-4fd4-829e-2ac5972542b7\") " pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:35 crc kubenswrapper[4857]: I1128 13:37:35.889639 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-857tv" Nov 28 13:37:35 crc kubenswrapper[4857]: I1128 13:37:35.898456 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:44 crc kubenswrapper[4857]: E1128 13:37:44.148151 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:3dbf9fd9dce75f1fb250ee4c4097ad77d2f34110b61d85e37abd9c472e022e6c" Nov 28 13:37:44 crc kubenswrapper[4857]: E1128 13:37:44.148967 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:3dbf9fd9dce75f1fb250ee4c4097ad77d2f34110b61d85e37abd9c472e022e6c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-t68xd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-7b64f4fb85-msp7g_openstack-operators(8ba8130c-f7e1-4cc0-8427-5a13997138ce): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:37:44 crc kubenswrapper[4857]: E1128 13:37:44.446852 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711" Nov 28 13:37:44 crc kubenswrapper[4857]: E1128 13:37:44.447310 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bxqgd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7b4567c7cf-r9w5w_openstack-operators(49f7d7ee-64a4-4ce9-91ec-a76be0cdd249): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:37:44 crc kubenswrapper[4857]: I1128 13:37:44.948799 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm"] Nov 28 13:37:44 crc kubenswrapper[4857]: I1128 13:37:44.953002 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m"] Nov 28 13:37:45 crc kubenswrapper[4857]: W1128 13:37:45.009407 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf39d1519_87df_476d_b47a_8b2857c23843.slice/crio-6d416001c05beb47427817a03bd904a97eb9112bbcc3bc08f66cb02aac39da4c WatchSource:0}: Error finding container 6d416001c05beb47427817a03bd904a97eb9112bbcc3bc08f66cb02aac39da4c: Status 404 returned error can't find the container with id 6d416001c05beb47427817a03bd904a97eb9112bbcc3bc08f66cb02aac39da4c Nov 28 13:37:45 crc kubenswrapper[4857]: I1128 13:37:45.462187 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4wtvs" event={"ID":"502d3010-ae32-4e6f-a7bf-614cd1da9dda","Type":"ContainerStarted","Data":"6fba1a48ab0ad75a0fff8d7d011c7c8b8f4b53c9ffb57d17972cbee9444652d3"} Nov 28 13:37:45 crc kubenswrapper[4857]: I1128 13:37:45.472561 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-fp6fc" event={"ID":"41a3b8e7-e61b-45fc-a87e-99e2d943fd15","Type":"ContainerStarted","Data":"83f43a5bd905994bab7091c98c3a9e1b8e72de7d5d7edac2a603c80aa0dc067c"} Nov 28 13:37:45 crc kubenswrapper[4857]: I1128 13:37:45.481465 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nv4bx" event={"ID":"4f45a249-50b4-466c-a54b-9205e5a127e7","Type":"ContainerStarted","Data":"41eece371652ac5114c85ca340178685a911abd1c6bff93440994ff2e3124f18"} Nov 28 13:37:45 crc kubenswrapper[4857]: I1128 13:37:45.498372 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-x4dfd" event={"ID":"968d6179-1a75-405c-97cc-cad775d59e28","Type":"ContainerStarted","Data":"fb371213b53e7db1bbaeb98ffe18bcadb295beb7ba0c9532a67606713dcfea25"} Nov 28 13:37:45 crc kubenswrapper[4857]: I1128 13:37:45.517489 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" event={"ID":"340e937e-6fd3-4fd4-829e-2ac5972542b7","Type":"ContainerStarted","Data":"169c0f8c275f7ff9b0058b15258940722ac8a61bb799fc54457fcf80a257ae5e"} Nov 28 13:37:45 crc kubenswrapper[4857]: I1128 13:37:45.517558 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" event={"ID":"340e937e-6fd3-4fd4-829e-2ac5972542b7","Type":"ContainerStarted","Data":"43e7a3e208a9b6070ec1c6477fdf7940772d3efbc038938324a18c9e83196335"} Nov 28 13:37:45 crc kubenswrapper[4857]: I1128 13:37:45.518881 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:45 crc kubenswrapper[4857]: I1128 13:37:45.525713 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-4gn9q" event={"ID":"3c9f0811-92a3-4681-b71e-28d474c3751e","Type":"ContainerStarted","Data":"4c8fcd7f22eb4303b9c36a5e8460843f9e66ee3e3f3aa96eefa3727d7a89caf3"} Nov 28 13:37:45 crc kubenswrapper[4857]: I1128 13:37:45.527498 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-wkfb4" event={"ID":"e4010942-0dec-4e3e-8f52-f69abf7ace10","Type":"ContainerStarted","Data":"c8b8818aaa3ddded7a0873d593f0ffe876a90b47fc641ffa06664af365b94fe5"} Nov 28 13:37:45 crc kubenswrapper[4857]: I1128 13:37:45.588838 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-fr6n9" event={"ID":"42e0b127-fc00-42c7-a7d8-9e5ea55a6590","Type":"ContainerStarted","Data":"bee4f45e7db13431273c774b3636052cf5acb02ece4cd3c1f7b26d2e158edbe3"} Nov 28 13:37:45 crc kubenswrapper[4857]: I1128 13:37:45.590955 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-7lsxw" event={"ID":"faeedd17-af99-4393-bf5c-ac5cc3b2d7b5","Type":"ContainerStarted","Data":"ba14e81a6616c37b56b6824347385d9bb0f0affff870525a0c8de30c6465bfcc"} Nov 28 13:37:45 crc kubenswrapper[4857]: I1128 13:37:45.610473 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" event={"ID":"f39d1519-87df-476d-b47a-8b2857c23843","Type":"ContainerStarted","Data":"6d416001c05beb47427817a03bd904a97eb9112bbcc3bc08f66cb02aac39da4c"} Nov 28 13:37:45 crc kubenswrapper[4857]: I1128 13:37:45.645210 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-cwbkd" event={"ID":"b35bb4aa-b164-47fe-85bd-8f34b7e55e5e","Type":"ContainerStarted","Data":"2a6b24044cb8f4370b26046fdd67865cfa73db84f60c85e5b9ed56b0758ab1b3"} Nov 28 13:37:45 crc kubenswrapper[4857]: I1128 13:37:45.689088 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-c9dqn" event={"ID":"a4e0aa4d-510c-4880-84fd-998e7527e41d","Type":"ContainerStarted","Data":"957ec1c3b3c0428241fa80d53c4843c898d97734d6894b280a4e636c2fe75324"} Nov 28 13:37:48 crc kubenswrapper[4857]: I1128 13:37:48.357155 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" podStartSLOduration=29.357135142 podStartE2EDuration="29.357135142s" podCreationTimestamp="2025-11-28 13:37:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:37:45.637287943 +0000 UTC m=+1157.664663110" watchObservedRunningTime="2025-11-28 13:37:48.357135142 +0000 UTC m=+1160.384510309" Nov 28 13:37:50 crc kubenswrapper[4857]: I1128 13:37:50.945949 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert\") pod \"infra-operator-controller-manager-57548d458d-qb9wz\" (UID: \"d437c518-be55-44c0-b374-5c3d2d62b49a\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" Nov 28 13:37:50 crc kubenswrapper[4857]: I1128 13:37:50.952235 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d437c518-be55-44c0-b374-5c3d2d62b49a-cert\") pod \"infra-operator-controller-manager-57548d458d-qb9wz\" (UID: \"d437c518-be55-44c0-b374-5c3d2d62b49a\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" Nov 28 13:37:51 crc kubenswrapper[4857]: I1128 13:37:51.252670 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-khqvk" Nov 28 13:37:51 crc kubenswrapper[4857]: I1128 13:37:51.262002 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" Nov 28 13:37:54 crc kubenswrapper[4857]: I1128 13:37:54.980142 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz"] Nov 28 13:37:55 crc kubenswrapper[4857]: W1128 13:37:55.054096 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd437c518_be55_44c0_b374_5c3d2d62b49a.slice/crio-aa451ff9d5101538c300dc26b05d21a17836b695e1670019c1ca576badc75d3f WatchSource:0}: Error finding container aa451ff9d5101538c300dc26b05d21a17836b695e1670019c1ca576badc75d3f: Status 404 returned error can't find the container with id aa451ff9d5101538c300dc26b05d21a17836b695e1670019c1ca576badc75d3f Nov 28 13:37:55 crc kubenswrapper[4857]: I1128 13:37:55.828998 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" event={"ID":"d437c518-be55-44c0-b374-5c3d2d62b49a","Type":"ContainerStarted","Data":"aa451ff9d5101538c300dc26b05d21a17836b695e1670019c1ca576badc75d3f"} Nov 28 13:37:55 crc kubenswrapper[4857]: I1128 13:37:55.830782 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-dlmmw" event={"ID":"af0c9c82-73f4-4bff-b0f6-a94c0d6e731a","Type":"ContainerStarted","Data":"95782e765d33c7f3f1471bf437aad4bb79da65bd6d2f16af538f01e9494df0d7"} Nov 28 13:37:55 crc kubenswrapper[4857]: I1128 13:37:55.832855 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-nwrtk" event={"ID":"de166f90-9b9c-49b3-b12b-0e36ae5db4da","Type":"ContainerStarted","Data":"83645997fd6bf29bd2573822eb9a9067f686dbaf417ca3d3d95065f013eff4cf"} Nov 28 13:37:55 crc kubenswrapper[4857]: I1128 13:37:55.834144 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-ggrfl" event={"ID":"5c39c627-3379-4971-8d55-48bede6d34ec","Type":"ContainerStarted","Data":"686224f97a12026c8f820ff1da297fc244ec1f09f62d8a239a03003106d9a22c"} Nov 28 13:37:55 crc kubenswrapper[4857]: I1128 13:37:55.904392 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-888bbc64f-m879m" Nov 28 13:37:56 crc kubenswrapper[4857]: E1128 13:37:56.545704 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-msp7g" podUID="8ba8130c-f7e1-4cc0-8427-5a13997138ce" Nov 28 13:37:56 crc kubenswrapper[4857]: E1128 13:37:56.597307 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-r9w5w" podUID="49f7d7ee-64a4-4ce9-91ec-a76be0cdd249" Nov 28 13:37:56 crc kubenswrapper[4857]: I1128 13:37:56.850355 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" event={"ID":"f39d1519-87df-476d-b47a-8b2857c23843","Type":"ContainerStarted","Data":"e2b5eadc337dec7981b7db3436ef0343414645924eb46cd722200e5e9c33e315"} Nov 28 13:37:56 crc kubenswrapper[4857]: I1128 13:37:56.852839 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-ngcdx" event={"ID":"90c064f2-ec25-44c8-ab5a-17fdb307cfe6","Type":"ContainerStarted","Data":"6f576ad25e84b35bab0186d048f4627df2a669bdaaf541e3be4a2f02afc9feae"} Nov 28 13:37:56 crc kubenswrapper[4857]: I1128 13:37:56.863626 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-msp7g" event={"ID":"8ba8130c-f7e1-4cc0-8427-5a13997138ce","Type":"ContainerStarted","Data":"239136b6a59458095622168bd226cfebf3ef7fca34e5fa63e914cd534bebde5b"} Nov 28 13:37:56 crc kubenswrapper[4857]: I1128 13:37:56.902882 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-nwrtk" event={"ID":"de166f90-9b9c-49b3-b12b-0e36ae5db4da","Type":"ContainerStarted","Data":"2e74fd7f3a32ac706a46fbd9cdf863c6cb607a7791fbfbe00025d42e91a7e5ea"} Nov 28 13:37:56 crc kubenswrapper[4857]: I1128 13:37:56.903511 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-nwrtk" Nov 28 13:37:56 crc kubenswrapper[4857]: I1128 13:37:56.910030 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-r9w5w" event={"ID":"49f7d7ee-64a4-4ce9-91ec-a76be0cdd249","Type":"ContainerStarted","Data":"c3b80c12107fc23d94aa4672d6291891c95f022287cd7417504d863591c6b981"} Nov 28 13:37:56 crc kubenswrapper[4857]: I1128 13:37:56.917760 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b9nj2" event={"ID":"5d82c76b-a0e1-4001-8676-390818e9edaf","Type":"ContainerStarted","Data":"95caa8762476777f7ea0911a5e0ca7f96c4f55385a5cb6c0b265ef2855b77337"} Nov 28 13:37:56 crc kubenswrapper[4857]: I1128 13:37:56.928396 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-64gmq" event={"ID":"729a83b9-4e01-4943-abed-58960ed40e68","Type":"ContainerStarted","Data":"f4334b5a541acf485f869ec050483f7cfd4e890d032d4fb597ba44dfda7edb29"} Nov 28 13:37:56 crc kubenswrapper[4857]: I1128 13:37:56.931098 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-nwrtk" podStartSLOduration=2.647942289 podStartE2EDuration="37.931081912s" podCreationTimestamp="2025-11-28 13:37:19 +0000 UTC" firstStartedPulling="2025-11-28 13:37:21.092016863 +0000 UTC m=+1133.119392030" lastFinishedPulling="2025-11-28 13:37:56.375156486 +0000 UTC m=+1168.402531653" observedRunningTime="2025-11-28 13:37:56.922654255 +0000 UTC m=+1168.950029422" watchObservedRunningTime="2025-11-28 13:37:56.931081912 +0000 UTC m=+1168.958457079" Nov 28 13:37:56 crc kubenswrapper[4857]: I1128 13:37:56.941453 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-b9nj2" podStartSLOduration=4.164635324 podStartE2EDuration="37.941437605s" podCreationTimestamp="2025-11-28 13:37:19 +0000 UTC" firstStartedPulling="2025-11-28 13:37:21.093080634 +0000 UTC m=+1133.120455801" lastFinishedPulling="2025-11-28 13:37:54.869882915 +0000 UTC m=+1166.897258082" observedRunningTime="2025-11-28 13:37:56.938713496 +0000 UTC m=+1168.966088663" watchObservedRunningTime="2025-11-28 13:37:56.941437605 +0000 UTC m=+1168.968812772" Nov 28 13:37:57 crc kubenswrapper[4857]: E1128 13:37:57.432452 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-zzqvn" podUID="78748478-834d-4797-b214-b72206253e23" Nov 28 13:37:57 crc kubenswrapper[4857]: I1128 13:37:57.952386 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" event={"ID":"f39d1519-87df-476d-b47a-8b2857c23843","Type":"ContainerStarted","Data":"c396f6fbb8bc28a64faab54d5c227d5ed0126b5142482a218e48772ea710c827"} Nov 28 13:37:57 crc kubenswrapper[4857]: I1128 13:37:57.952635 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" Nov 28 13:37:57 crc kubenswrapper[4857]: I1128 13:37:57.966328 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-ggrfl" event={"ID":"5c39c627-3379-4971-8d55-48bede6d34ec","Type":"ContainerStarted","Data":"bbef35356c9fd464d29ff9157bb53a92e7c6d98ca1bb9ebe50c2003137d68024"} Nov 28 13:37:57 crc kubenswrapper[4857]: I1128 13:37:57.966411 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-ggrfl" Nov 28 13:37:57 crc kubenswrapper[4857]: I1128 13:37:57.980826 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4wtvs" event={"ID":"502d3010-ae32-4e6f-a7bf-614cd1da9dda","Type":"ContainerStarted","Data":"a3a4e620fd8e10d46f800f3ed8ca16c295fbc972d7bc7074e3383492d26557a5"} Nov 28 13:37:57 crc kubenswrapper[4857]: I1128 13:37:57.981245 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4wtvs" Nov 28 13:37:57 crc kubenswrapper[4857]: I1128 13:37:57.985332 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4wtvs" Nov 28 13:37:57 crc kubenswrapper[4857]: I1128 13:37:57.990389 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-ngcdx" event={"ID":"90c064f2-ec25-44c8-ab5a-17fdb307cfe6","Type":"ContainerStarted","Data":"db5a2e5131a064986cecf66334aaf2cd1d3ecb41bd27ef7287508cc0271b05ef"} Nov 28 13:37:57 crc kubenswrapper[4857]: I1128 13:37:57.990566 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-ngcdx" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.007741 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" podStartSLOduration=30.085588823 podStartE2EDuration="40.007722963s" podCreationTimestamp="2025-11-28 13:37:18 +0000 UTC" firstStartedPulling="2025-11-28 13:37:45.037193854 +0000 UTC m=+1157.064569021" lastFinishedPulling="2025-11-28 13:37:54.959327994 +0000 UTC m=+1166.986703161" observedRunningTime="2025-11-28 13:37:58.002443058 +0000 UTC m=+1170.029818215" watchObservedRunningTime="2025-11-28 13:37:58.007722963 +0000 UTC m=+1170.035098130" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.012479 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-dlmmw" event={"ID":"af0c9c82-73f4-4bff-b0f6-a94c0d6e731a","Type":"ContainerStarted","Data":"6d1e9aa920a26fb92f32563d8e1e87923bd8ee4d7c30098369acc52c47f4d06e"} Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.013138 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-dlmmw" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.034311 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nv4bx" event={"ID":"4f45a249-50b4-466c-a54b-9205e5a127e7","Type":"ContainerStarted","Data":"e6005a0a7245556098223733817916293c359415deef75ec0855455046bb78b9"} Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.034469 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nv4bx" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.037067 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nv4bx" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.042273 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-x4dfd" event={"ID":"968d6179-1a75-405c-97cc-cad775d59e28","Type":"ContainerStarted","Data":"7afd542308299b251176bbf207ef5872f8d1581a31bc62d221e3c44beebc7f22"} Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.042475 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-x4dfd" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.044388 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-64gmq" event={"ID":"729a83b9-4e01-4943-abed-58960ed40e68","Type":"ContainerStarted","Data":"e62c94fd73cfb5e49330e05ad4e19dea839ebe480aebd2fb585e7b390ecbf09b"} Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.044512 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-64gmq" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.045331 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-x4dfd" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.046022 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-cwbkd" event={"ID":"b35bb4aa-b164-47fe-85bd-8f34b7e55e5e","Type":"ContainerStarted","Data":"183625bd660e04eec54a617474db9c942f25e9569f7b29882f4d6171c023708f"} Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.046169 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-cwbkd" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.048228 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-cwbkd" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.050572 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4wtvs" podStartSLOduration=4.219203013 podStartE2EDuration="40.050560357s" podCreationTimestamp="2025-11-28 13:37:18 +0000 UTC" firstStartedPulling="2025-11-28 13:37:20.662524688 +0000 UTC m=+1132.689899855" lastFinishedPulling="2025-11-28 13:37:56.493882032 +0000 UTC m=+1168.521257199" observedRunningTime="2025-11-28 13:37:58.048844747 +0000 UTC m=+1170.076219914" watchObservedRunningTime="2025-11-28 13:37:58.050560357 +0000 UTC m=+1170.077935524" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.068139 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-c9dqn" event={"ID":"a4e0aa4d-510c-4880-84fd-998e7527e41d","Type":"ContainerStarted","Data":"8f909e93dd0f695f3c047d941948db5faac0f333d8d0980ffeddb3286be16566"} Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.068851 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-c9dqn" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.076016 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-c9dqn" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.128223 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-7lsxw" event={"ID":"faeedd17-af99-4393-bf5c-ac5cc3b2d7b5","Type":"ContainerStarted","Data":"97baadaff39dd26f21ebcc526d13b4fed5ddc56623488ecf669ef305f3f38e77"} Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.128627 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-7lsxw" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.141625 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-7lsxw" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.151725 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-zzqvn" event={"ID":"78748478-834d-4797-b214-b72206253e23","Type":"ContainerStarted","Data":"9ee6bed963135f54bc052a99b02c79e50584d233b234eb79239d104d48ea5af0"} Nov 28 13:37:58 crc kubenswrapper[4857]: E1128 13:37:58.156722 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-hg5f2" podUID="cb4c2469-9178-463d-a9be-700af973c9b8" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.163384 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-4gn9q" event={"ID":"3c9f0811-92a3-4681-b71e-28d474c3751e","Type":"ContainerStarted","Data":"a326a7147a8ac944e810325bfba26c822826a69e3d02b206f2f182022657df23"} Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.164340 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-4gn9q" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.171559 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-ngcdx" podStartSLOduration=6.106742852 podStartE2EDuration="39.171543239s" podCreationTimestamp="2025-11-28 13:37:19 +0000 UTC" firstStartedPulling="2025-11-28 13:37:21.021439256 +0000 UTC m=+1133.048814423" lastFinishedPulling="2025-11-28 13:37:54.086239653 +0000 UTC m=+1166.113614810" observedRunningTime="2025-11-28 13:37:58.112674275 +0000 UTC m=+1170.140049442" watchObservedRunningTime="2025-11-28 13:37:58.171543239 +0000 UTC m=+1170.198918396" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.172169 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-ggrfl" podStartSLOduration=3.792625422 podStartE2EDuration="39.172163627s" podCreationTimestamp="2025-11-28 13:37:19 +0000 UTC" firstStartedPulling="2025-11-28 13:37:21.111996058 +0000 UTC m=+1133.139371235" lastFinishedPulling="2025-11-28 13:37:56.491534273 +0000 UTC m=+1168.518909440" observedRunningTime="2025-11-28 13:37:58.160531757 +0000 UTC m=+1170.187906934" watchObservedRunningTime="2025-11-28 13:37:58.172163627 +0000 UTC m=+1170.199538794" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.178938 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-fr6n9" event={"ID":"42e0b127-fc00-42c7-a7d8-9e5ea55a6590","Type":"ContainerStarted","Data":"7f0f9ffe2b7f74b4b7c7da051ef25d41b5e96ba7fff3e018955a67037b1ac5c8"} Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.180764 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-fr6n9" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.181697 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-4gn9q" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.184304 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-fr6n9" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.191271 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-wkfb4" event={"ID":"e4010942-0dec-4e3e-8f52-f69abf7ace10","Type":"ContainerStarted","Data":"3f711e2ab4d855c8dd1ee0bed6c65947fdf58a052d8b68c1f43ff4cf32afda7e"} Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.191316 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-wkfb4" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.208000 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-wkfb4" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.234022 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-c9dqn" podStartSLOduration=4.109349817 podStartE2EDuration="40.234006488s" podCreationTimestamp="2025-11-28 13:37:18 +0000 UTC" firstStartedPulling="2025-11-28 13:37:20.823004237 +0000 UTC m=+1132.850379394" lastFinishedPulling="2025-11-28 13:37:56.947660898 +0000 UTC m=+1168.975036065" observedRunningTime="2025-11-28 13:37:58.232638788 +0000 UTC m=+1170.260013955" watchObservedRunningTime="2025-11-28 13:37:58.234006488 +0000 UTC m=+1170.261381655" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.286402 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-dlmmw" podStartSLOduration=4.006847343 podStartE2EDuration="39.286364931s" podCreationTimestamp="2025-11-28 13:37:19 +0000 UTC" firstStartedPulling="2025-11-28 13:37:21.09296155 +0000 UTC m=+1133.120336717" lastFinishedPulling="2025-11-28 13:37:56.372479138 +0000 UTC m=+1168.399854305" observedRunningTime="2025-11-28 13:37:58.277860852 +0000 UTC m=+1170.305236019" watchObservedRunningTime="2025-11-28 13:37:58.286364931 +0000 UTC m=+1170.313740098" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.369903 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-cwbkd" podStartSLOduration=4.6180037 podStartE2EDuration="40.369876236s" podCreationTimestamp="2025-11-28 13:37:18 +0000 UTC" firstStartedPulling="2025-11-28 13:37:20.672036387 +0000 UTC m=+1132.699411554" lastFinishedPulling="2025-11-28 13:37:56.423908923 +0000 UTC m=+1168.451284090" observedRunningTime="2025-11-28 13:37:58.325643551 +0000 UTC m=+1170.353018718" watchObservedRunningTime="2025-11-28 13:37:58.369876236 +0000 UTC m=+1170.397251413" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.371776 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-x4dfd" podStartSLOduration=4.385563204 podStartE2EDuration="40.371768651s" podCreationTimestamp="2025-11-28 13:37:18 +0000 UTC" firstStartedPulling="2025-11-28 13:37:20.553830666 +0000 UTC m=+1132.581205833" lastFinishedPulling="2025-11-28 13:37:56.540036113 +0000 UTC m=+1168.567411280" observedRunningTime="2025-11-28 13:37:58.355097203 +0000 UTC m=+1170.382472370" watchObservedRunningTime="2025-11-28 13:37:58.371768651 +0000 UTC m=+1170.399143828" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.411792 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-nv4bx" podStartSLOduration=4.269823464 podStartE2EDuration="40.411740001s" podCreationTimestamp="2025-11-28 13:37:18 +0000 UTC" firstStartedPulling="2025-11-28 13:37:20.821252555 +0000 UTC m=+1132.848627722" lastFinishedPulling="2025-11-28 13:37:56.963169082 +0000 UTC m=+1168.990544259" observedRunningTime="2025-11-28 13:37:58.396924768 +0000 UTC m=+1170.424299935" watchObservedRunningTime="2025-11-28 13:37:58.411740001 +0000 UTC m=+1170.439115168" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.451405 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-7lsxw" podStartSLOduration=4.79360092 podStartE2EDuration="40.451379032s" podCreationTimestamp="2025-11-28 13:37:18 +0000 UTC" firstStartedPulling="2025-11-28 13:37:20.789505886 +0000 UTC m=+1132.816881053" lastFinishedPulling="2025-11-28 13:37:56.447283988 +0000 UTC m=+1168.474659165" observedRunningTime="2025-11-28 13:37:58.432046046 +0000 UTC m=+1170.459421223" watchObservedRunningTime="2025-11-28 13:37:58.451379032 +0000 UTC m=+1170.478754199" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.505524 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d77b94747-64gmq" podStartSLOduration=6.552832062 podStartE2EDuration="39.505503346s" podCreationTimestamp="2025-11-28 13:37:19 +0000 UTC" firstStartedPulling="2025-11-28 13:37:21.024489776 +0000 UTC m=+1133.051864943" lastFinishedPulling="2025-11-28 13:37:53.97716106 +0000 UTC m=+1166.004536227" observedRunningTime="2025-11-28 13:37:58.497308927 +0000 UTC m=+1170.524684094" watchObservedRunningTime="2025-11-28 13:37:58.505503346 +0000 UTC m=+1170.532878523" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.534243 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-wkfb4" podStartSLOduration=3.541021557 podStartE2EDuration="40.534219347s" podCreationTimestamp="2025-11-28 13:37:18 +0000 UTC" firstStartedPulling="2025-11-28 13:37:20.237313669 +0000 UTC m=+1132.264688836" lastFinishedPulling="2025-11-28 13:37:57.230511459 +0000 UTC m=+1169.257886626" observedRunningTime="2025-11-28 13:37:58.521126564 +0000 UTC m=+1170.548501761" watchObservedRunningTime="2025-11-28 13:37:58.534219347 +0000 UTC m=+1170.561594514" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.544364 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-4gn9q" podStartSLOduration=4.319937953 podStartE2EDuration="40.544344534s" podCreationTimestamp="2025-11-28 13:37:18 +0000 UTC" firstStartedPulling="2025-11-28 13:37:20.589170721 +0000 UTC m=+1132.616545888" lastFinishedPulling="2025-11-28 13:37:56.813577302 +0000 UTC m=+1168.840952469" observedRunningTime="2025-11-28 13:37:58.538656367 +0000 UTC m=+1170.566031534" watchObservedRunningTime="2025-11-28 13:37:58.544344534 +0000 UTC m=+1170.571719701" Nov 28 13:37:58 crc kubenswrapper[4857]: I1128 13:37:58.569718 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-955677c94-fr6n9" podStartSLOduration=4.637952623 podStartE2EDuration="40.569674145s" podCreationTimestamp="2025-11-28 13:37:18 +0000 UTC" firstStartedPulling="2025-11-28 13:37:20.782231173 +0000 UTC m=+1132.809606340" lastFinishedPulling="2025-11-28 13:37:56.713952695 +0000 UTC m=+1168.741327862" observedRunningTime="2025-11-28 13:37:58.561330621 +0000 UTC m=+1170.588705788" watchObservedRunningTime="2025-11-28 13:37:58.569674145 +0000 UTC m=+1170.597049312" Nov 28 13:37:59 crc kubenswrapper[4857]: I1128 13:37:59.201109 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-zzqvn" event={"ID":"78748478-834d-4797-b214-b72206253e23","Type":"ContainerStarted","Data":"fe08c53d26f09cf921dd45466dd3ff5f5cdf49b521d138886800cc6474aeb05e"} Nov 28 13:37:59 crc kubenswrapper[4857]: I1128 13:37:59.201379 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-zzqvn" Nov 28 13:37:59 crc kubenswrapper[4857]: I1128 13:37:59.202452 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-msp7g" event={"ID":"8ba8130c-f7e1-4cc0-8427-5a13997138ce","Type":"ContainerStarted","Data":"bc2b85b91a7d4d348f290b93c7d80a5aebce51b01a6e2809102095e628134aa7"} Nov 28 13:37:59 crc kubenswrapper[4857]: I1128 13:37:59.202856 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-msp7g" Nov 28 13:37:59 crc kubenswrapper[4857]: I1128 13:37:59.204225 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-r9w5w" event={"ID":"49f7d7ee-64a4-4ce9-91ec-a76be0cdd249","Type":"ContainerStarted","Data":"37c5e6b077ebe012ad3dda498217416eb077b6fac309b869de28e9d0e5ccf3ab"} Nov 28 13:37:59 crc kubenswrapper[4857]: I1128 13:37:59.204569 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-r9w5w" Nov 28 13:37:59 crc kubenswrapper[4857]: I1128 13:37:59.205920 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-hg5f2" event={"ID":"cb4c2469-9178-463d-a9be-700af973c9b8","Type":"ContainerStarted","Data":"f5935d62312a35e51b293159486facd7100312c9fd374e357b176553070a4739"} Nov 28 13:37:59 crc kubenswrapper[4857]: I1128 13:37:59.211081 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-fp6fc" event={"ID":"41a3b8e7-e61b-45fc-a87e-99e2d943fd15","Type":"ContainerStarted","Data":"3bdb8ea39f761d63e785085e44a5a26e6dece96ad01baa5c5adee8c5aace2f2a"} Nov 28 13:37:59 crc kubenswrapper[4857]: I1128 13:37:59.213128 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-fp6fc" Nov 28 13:37:59 crc kubenswrapper[4857]: I1128 13:37:59.216284 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-fp6fc" Nov 28 13:37:59 crc kubenswrapper[4857]: I1128 13:37:59.240981 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-r9w5w" podStartSLOduration=4.247662836 podStartE2EDuration="41.240947739s" podCreationTimestamp="2025-11-28 13:37:18 +0000 UTC" firstStartedPulling="2025-11-28 13:37:20.675612001 +0000 UTC m=+1132.702987178" lastFinishedPulling="2025-11-28 13:37:57.668896914 +0000 UTC m=+1169.696272081" observedRunningTime="2025-11-28 13:37:59.238777445 +0000 UTC m=+1171.266152602" watchObservedRunningTime="2025-11-28 13:37:59.240947739 +0000 UTC m=+1171.268322956" Nov 28 13:37:59 crc kubenswrapper[4857]: I1128 13:37:59.244096 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-zzqvn" podStartSLOduration=3.369451905 podStartE2EDuration="41.24407687s" podCreationTimestamp="2025-11-28 13:37:18 +0000 UTC" firstStartedPulling="2025-11-28 13:37:21.01781258 +0000 UTC m=+1133.045187747" lastFinishedPulling="2025-11-28 13:37:58.892437535 +0000 UTC m=+1170.919812712" observedRunningTime="2025-11-28 13:37:59.220219982 +0000 UTC m=+1171.247595199" watchObservedRunningTime="2025-11-28 13:37:59.24407687 +0000 UTC m=+1171.271452067" Nov 28 13:37:59 crc kubenswrapper[4857]: I1128 13:37:59.261490 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-msp7g" podStartSLOduration=4.116807786 podStartE2EDuration="41.261466829s" podCreationTimestamp="2025-11-28 13:37:18 +0000 UTC" firstStartedPulling="2025-11-28 13:37:20.708509175 +0000 UTC m=+1132.735884342" lastFinishedPulling="2025-11-28 13:37:57.853168218 +0000 UTC m=+1169.880543385" observedRunningTime="2025-11-28 13:37:59.254467334 +0000 UTC m=+1171.281842511" watchObservedRunningTime="2025-11-28 13:37:59.261466829 +0000 UTC m=+1171.288842006" Nov 28 13:37:59 crc kubenswrapper[4857]: I1128 13:37:59.281554 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-fp6fc" podStartSLOduration=3.568463841 podStartE2EDuration="41.281534517s" podCreationTimestamp="2025-11-28 13:37:18 +0000 UTC" firstStartedPulling="2025-11-28 13:37:19.954784237 +0000 UTC m=+1131.982159404" lastFinishedPulling="2025-11-28 13:37:57.667854913 +0000 UTC m=+1169.695230080" observedRunningTime="2025-11-28 13:37:59.28027776 +0000 UTC m=+1171.307652957" watchObservedRunningTime="2025-11-28 13:37:59.281534517 +0000 UTC m=+1171.308909684" Nov 28 13:38:00 crc kubenswrapper[4857]: I1128 13:38:00.217841 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-hg5f2" event={"ID":"cb4c2469-9178-463d-a9be-700af973c9b8","Type":"ContainerStarted","Data":"4390e90b66c648e85a73b7c5005a5c3b65174f4e61dae351be4c9f1357904c37"} Nov 28 13:38:00 crc kubenswrapper[4857]: I1128 13:38:00.218307 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-hg5f2" Nov 28 13:38:00 crc kubenswrapper[4857]: I1128 13:38:00.218668 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-dlmmw" Nov 28 13:38:00 crc kubenswrapper[4857]: I1128 13:38:00.219733 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" event={"ID":"d437c518-be55-44c0-b374-5c3d2d62b49a","Type":"ContainerStarted","Data":"868378e860ed09805d58a4e360bf2ff27994aad13a13eee91b473fbffc5ab553"} Nov 28 13:38:00 crc kubenswrapper[4857]: I1128 13:38:00.240874 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-hg5f2" podStartSLOduration=2.862672398 podStartE2EDuration="42.240851483s" podCreationTimestamp="2025-11-28 13:37:18 +0000 UTC" firstStartedPulling="2025-11-28 13:37:20.561989905 +0000 UTC m=+1132.589365062" lastFinishedPulling="2025-11-28 13:37:59.94016898 +0000 UTC m=+1171.967544147" observedRunningTime="2025-11-28 13:38:00.23493843 +0000 UTC m=+1172.262313627" watchObservedRunningTime="2025-11-28 13:38:00.240851483 +0000 UTC m=+1172.268226650" Nov 28 13:38:00 crc kubenswrapper[4857]: I1128 13:38:00.248264 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-nwrtk" Nov 28 13:38:00 crc kubenswrapper[4857]: I1128 13:38:00.285023 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-ggrfl" Nov 28 13:38:01 crc kubenswrapper[4857]: I1128 13:38:01.233113 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" event={"ID":"d437c518-be55-44c0-b374-5c3d2d62b49a","Type":"ContainerStarted","Data":"445ed0134ddc070d09d409e5313423ee29c1b411e00e36cdb3aea77ac48b5f60"} Nov 28 13:38:01 crc kubenswrapper[4857]: I1128 13:38:01.233931 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" Nov 28 13:38:01 crc kubenswrapper[4857]: I1128 13:38:01.253293 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" podStartSLOduration=38.36775865 podStartE2EDuration="43.253273205s" podCreationTimestamp="2025-11-28 13:37:18 +0000 UTC" firstStartedPulling="2025-11-28 13:37:55.060401443 +0000 UTC m=+1167.087776610" lastFinishedPulling="2025-11-28 13:37:59.945916008 +0000 UTC m=+1171.973291165" observedRunningTime="2025-11-28 13:38:01.249929297 +0000 UTC m=+1173.277304464" watchObservedRunningTime="2025-11-28 13:38:01.253273205 +0000 UTC m=+1173.280648372" Nov 28 13:38:03 crc kubenswrapper[4857]: I1128 13:38:03.178607 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:38:03 crc kubenswrapper[4857]: I1128 13:38:03.179119 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:38:03 crc kubenswrapper[4857]: I1128 13:38:03.179211 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:38:03 crc kubenswrapper[4857]: I1128 13:38:03.180680 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c7acb098908896eeec6673568d27f9b2d0362ab62a9a136da040ab452639a28c"} pod="openshift-machine-config-operator/machine-config-daemon-jdgls" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 13:38:03 crc kubenswrapper[4857]: I1128 13:38:03.180828 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" containerID="cri-o://c7acb098908896eeec6673568d27f9b2d0362ab62a9a136da040ab452639a28c" gracePeriod=600 Nov 28 13:38:04 crc kubenswrapper[4857]: I1128 13:38:04.261197 4857 generic.go:334] "Generic (PLEG): container finished" podID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerID="c7acb098908896eeec6673568d27f9b2d0362ab62a9a136da040ab452639a28c" exitCode=0 Nov 28 13:38:04 crc kubenswrapper[4857]: I1128 13:38:04.261245 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerDied","Data":"c7acb098908896eeec6673568d27f9b2d0362ab62a9a136da040ab452639a28c"} Nov 28 13:38:04 crc kubenswrapper[4857]: I1128 13:38:04.261803 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerStarted","Data":"e3860c9cd9dfa55680c98f69cece6eff0f08ced38d345f3573b02bd062397f7a"} Nov 28 13:38:04 crc kubenswrapper[4857]: I1128 13:38:04.261831 4857 scope.go:117] "RemoveContainer" containerID="2e8dd17747c47de8cbeb5abc3c3cfa11211aa6c3f675d9205fe31b2543798131" Nov 28 13:38:05 crc kubenswrapper[4857]: I1128 13:38:05.608529 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm" Nov 28 13:38:10 crc kubenswrapper[4857]: I1128 13:38:09.229026 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-hg5f2" Nov 28 13:38:10 crc kubenswrapper[4857]: I1128 13:38:09.272468 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-msp7g" Nov 28 13:38:10 crc kubenswrapper[4857]: I1128 13:38:09.551481 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-r9w5w" Nov 28 13:38:10 crc kubenswrapper[4857]: I1128 13:38:10.033660 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-zzqvn" Nov 28 13:38:10 crc kubenswrapper[4857]: I1128 13:38:10.043479 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-ngcdx" Nov 28 13:38:10 crc kubenswrapper[4857]: I1128 13:38:10.203372 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-64gmq" Nov 28 13:38:11 crc kubenswrapper[4857]: I1128 13:38:11.272456 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-qb9wz" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.672230 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-hr8rg"] Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.674435 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-hr8rg" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.678186 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-nr76z" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.678315 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.678435 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.679847 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.689272 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-hr8rg"] Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.714860 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d865a2dc-946b-4a9c-814f-3406432d5a4e-config\") pod \"dnsmasq-dns-675f4bcbfc-hr8rg\" (UID: \"d865a2dc-946b-4a9c-814f-3406432d5a4e\") " pod="openstack/dnsmasq-dns-675f4bcbfc-hr8rg" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.714985 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52w2j\" (UniqueName: \"kubernetes.io/projected/d865a2dc-946b-4a9c-814f-3406432d5a4e-kube-api-access-52w2j\") pod \"dnsmasq-dns-675f4bcbfc-hr8rg\" (UID: \"d865a2dc-946b-4a9c-814f-3406432d5a4e\") " pod="openstack/dnsmasq-dns-675f4bcbfc-hr8rg" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.743931 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-cn2c5"] Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.745350 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-cn2c5" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.747700 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.753476 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-cn2c5"] Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.816299 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32-config\") pod \"dnsmasq-dns-78dd6ddcc-cn2c5\" (UID: \"1de1ce93-7a6d-4d58-90cd-03d94d4a1a32\") " pod="openstack/dnsmasq-dns-78dd6ddcc-cn2c5" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.816371 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52w2j\" (UniqueName: \"kubernetes.io/projected/d865a2dc-946b-4a9c-814f-3406432d5a4e-kube-api-access-52w2j\") pod \"dnsmasq-dns-675f4bcbfc-hr8rg\" (UID: \"d865a2dc-946b-4a9c-814f-3406432d5a4e\") " pod="openstack/dnsmasq-dns-675f4bcbfc-hr8rg" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.816406 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cds5\" (UniqueName: \"kubernetes.io/projected/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32-kube-api-access-2cds5\") pod \"dnsmasq-dns-78dd6ddcc-cn2c5\" (UID: \"1de1ce93-7a6d-4d58-90cd-03d94d4a1a32\") " pod="openstack/dnsmasq-dns-78dd6ddcc-cn2c5" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.816461 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d865a2dc-946b-4a9c-814f-3406432d5a4e-config\") pod \"dnsmasq-dns-675f4bcbfc-hr8rg\" (UID: \"d865a2dc-946b-4a9c-814f-3406432d5a4e\") " pod="openstack/dnsmasq-dns-675f4bcbfc-hr8rg" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.816494 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-cn2c5\" (UID: \"1de1ce93-7a6d-4d58-90cd-03d94d4a1a32\") " pod="openstack/dnsmasq-dns-78dd6ddcc-cn2c5" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.817584 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d865a2dc-946b-4a9c-814f-3406432d5a4e-config\") pod \"dnsmasq-dns-675f4bcbfc-hr8rg\" (UID: \"d865a2dc-946b-4a9c-814f-3406432d5a4e\") " pod="openstack/dnsmasq-dns-675f4bcbfc-hr8rg" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.837102 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52w2j\" (UniqueName: \"kubernetes.io/projected/d865a2dc-946b-4a9c-814f-3406432d5a4e-kube-api-access-52w2j\") pod \"dnsmasq-dns-675f4bcbfc-hr8rg\" (UID: \"d865a2dc-946b-4a9c-814f-3406432d5a4e\") " pod="openstack/dnsmasq-dns-675f4bcbfc-hr8rg" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.917467 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32-config\") pod \"dnsmasq-dns-78dd6ddcc-cn2c5\" (UID: \"1de1ce93-7a6d-4d58-90cd-03d94d4a1a32\") " pod="openstack/dnsmasq-dns-78dd6ddcc-cn2c5" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.917781 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cds5\" (UniqueName: \"kubernetes.io/projected/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32-kube-api-access-2cds5\") pod \"dnsmasq-dns-78dd6ddcc-cn2c5\" (UID: \"1de1ce93-7a6d-4d58-90cd-03d94d4a1a32\") " pod="openstack/dnsmasq-dns-78dd6ddcc-cn2c5" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.917873 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-cn2c5\" (UID: \"1de1ce93-7a6d-4d58-90cd-03d94d4a1a32\") " pod="openstack/dnsmasq-dns-78dd6ddcc-cn2c5" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.918480 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32-config\") pod \"dnsmasq-dns-78dd6ddcc-cn2c5\" (UID: \"1de1ce93-7a6d-4d58-90cd-03d94d4a1a32\") " pod="openstack/dnsmasq-dns-78dd6ddcc-cn2c5" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.918607 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-cn2c5\" (UID: \"1de1ce93-7a6d-4d58-90cd-03d94d4a1a32\") " pod="openstack/dnsmasq-dns-78dd6ddcc-cn2c5" Nov 28 13:38:27 crc kubenswrapper[4857]: I1128 13:38:27.935177 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cds5\" (UniqueName: \"kubernetes.io/projected/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32-kube-api-access-2cds5\") pod \"dnsmasq-dns-78dd6ddcc-cn2c5\" (UID: \"1de1ce93-7a6d-4d58-90cd-03d94d4a1a32\") " pod="openstack/dnsmasq-dns-78dd6ddcc-cn2c5" Nov 28 13:38:28 crc kubenswrapper[4857]: I1128 13:38:28.006634 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-hr8rg" Nov 28 13:38:28 crc kubenswrapper[4857]: I1128 13:38:28.059450 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-cn2c5" Nov 28 13:38:28 crc kubenswrapper[4857]: I1128 13:38:28.481617 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-hr8rg"] Nov 28 13:38:28 crc kubenswrapper[4857]: W1128 13:38:28.492238 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd865a2dc_946b_4a9c_814f_3406432d5a4e.slice/crio-19710d2efdec782295f35f4309e52bee137eafd7d9f52cab67080553ef2d9162 WatchSource:0}: Error finding container 19710d2efdec782295f35f4309e52bee137eafd7d9f52cab67080553ef2d9162: Status 404 returned error can't find the container with id 19710d2efdec782295f35f4309e52bee137eafd7d9f52cab67080553ef2d9162 Nov 28 13:38:28 crc kubenswrapper[4857]: I1128 13:38:28.558477 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-cn2c5"] Nov 28 13:38:28 crc kubenswrapper[4857]: W1128 13:38:28.565735 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1de1ce93_7a6d_4d58_90cd_03d94d4a1a32.slice/crio-1591c6d7df081583c97677facf438ae0ebb965ff44314a1f5003d13f8afd53e4 WatchSource:0}: Error finding container 1591c6d7df081583c97677facf438ae0ebb965ff44314a1f5003d13f8afd53e4: Status 404 returned error can't find the container with id 1591c6d7df081583c97677facf438ae0ebb965ff44314a1f5003d13f8afd53e4 Nov 28 13:38:29 crc kubenswrapper[4857]: I1128 13:38:29.499422 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-hr8rg" event={"ID":"d865a2dc-946b-4a9c-814f-3406432d5a4e","Type":"ContainerStarted","Data":"19710d2efdec782295f35f4309e52bee137eafd7d9f52cab67080553ef2d9162"} Nov 28 13:38:29 crc kubenswrapper[4857]: I1128 13:38:29.501303 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-cn2c5" event={"ID":"1de1ce93-7a6d-4d58-90cd-03d94d4a1a32","Type":"ContainerStarted","Data":"1591c6d7df081583c97677facf438ae0ebb965ff44314a1f5003d13f8afd53e4"} Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.322992 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-hr8rg"] Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.346681 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-zl8dt"] Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.348160 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-zl8dt" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.360521 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwmgj\" (UniqueName: \"kubernetes.io/projected/5fbb4ae3-ae41-4be2-8f32-862531ae0737-kube-api-access-hwmgj\") pod \"dnsmasq-dns-666b6646f7-zl8dt\" (UID: \"5fbb4ae3-ae41-4be2-8f32-862531ae0737\") " pod="openstack/dnsmasq-dns-666b6646f7-zl8dt" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.360564 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5fbb4ae3-ae41-4be2-8f32-862531ae0737-dns-svc\") pod \"dnsmasq-dns-666b6646f7-zl8dt\" (UID: \"5fbb4ae3-ae41-4be2-8f32-862531ae0737\") " pod="openstack/dnsmasq-dns-666b6646f7-zl8dt" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.360628 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fbb4ae3-ae41-4be2-8f32-862531ae0737-config\") pod \"dnsmasq-dns-666b6646f7-zl8dt\" (UID: \"5fbb4ae3-ae41-4be2-8f32-862531ae0737\") " pod="openstack/dnsmasq-dns-666b6646f7-zl8dt" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.360639 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-zl8dt"] Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.463427 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fbb4ae3-ae41-4be2-8f32-862531ae0737-config\") pod \"dnsmasq-dns-666b6646f7-zl8dt\" (UID: \"5fbb4ae3-ae41-4be2-8f32-862531ae0737\") " pod="openstack/dnsmasq-dns-666b6646f7-zl8dt" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.463541 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwmgj\" (UniqueName: \"kubernetes.io/projected/5fbb4ae3-ae41-4be2-8f32-862531ae0737-kube-api-access-hwmgj\") pod \"dnsmasq-dns-666b6646f7-zl8dt\" (UID: \"5fbb4ae3-ae41-4be2-8f32-862531ae0737\") " pod="openstack/dnsmasq-dns-666b6646f7-zl8dt" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.463569 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5fbb4ae3-ae41-4be2-8f32-862531ae0737-dns-svc\") pod \"dnsmasq-dns-666b6646f7-zl8dt\" (UID: \"5fbb4ae3-ae41-4be2-8f32-862531ae0737\") " pod="openstack/dnsmasq-dns-666b6646f7-zl8dt" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.464420 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5fbb4ae3-ae41-4be2-8f32-862531ae0737-dns-svc\") pod \"dnsmasq-dns-666b6646f7-zl8dt\" (UID: \"5fbb4ae3-ae41-4be2-8f32-862531ae0737\") " pod="openstack/dnsmasq-dns-666b6646f7-zl8dt" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.464558 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fbb4ae3-ae41-4be2-8f32-862531ae0737-config\") pod \"dnsmasq-dns-666b6646f7-zl8dt\" (UID: \"5fbb4ae3-ae41-4be2-8f32-862531ae0737\") " pod="openstack/dnsmasq-dns-666b6646f7-zl8dt" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.502241 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwmgj\" (UniqueName: \"kubernetes.io/projected/5fbb4ae3-ae41-4be2-8f32-862531ae0737-kube-api-access-hwmgj\") pod \"dnsmasq-dns-666b6646f7-zl8dt\" (UID: \"5fbb4ae3-ae41-4be2-8f32-862531ae0737\") " pod="openstack/dnsmasq-dns-666b6646f7-zl8dt" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.672458 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-zl8dt" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.829728 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-cn2c5"] Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.843341 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-drj4p"] Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.844486 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-drj4p" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.872153 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/861147df-48f1-4c7d-b05e-a3eddec517e1-config\") pod \"dnsmasq-dns-57d769cc4f-drj4p\" (UID: \"861147df-48f1-4c7d-b05e-a3eddec517e1\") " pod="openstack/dnsmasq-dns-57d769cc4f-drj4p" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.872189 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/861147df-48f1-4c7d-b05e-a3eddec517e1-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-drj4p\" (UID: \"861147df-48f1-4c7d-b05e-a3eddec517e1\") " pod="openstack/dnsmasq-dns-57d769cc4f-drj4p" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.872246 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fx64m\" (UniqueName: \"kubernetes.io/projected/861147df-48f1-4c7d-b05e-a3eddec517e1-kube-api-access-fx64m\") pod \"dnsmasq-dns-57d769cc4f-drj4p\" (UID: \"861147df-48f1-4c7d-b05e-a3eddec517e1\") " pod="openstack/dnsmasq-dns-57d769cc4f-drj4p" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.886015 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-drj4p"] Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.973622 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fx64m\" (UniqueName: \"kubernetes.io/projected/861147df-48f1-4c7d-b05e-a3eddec517e1-kube-api-access-fx64m\") pod \"dnsmasq-dns-57d769cc4f-drj4p\" (UID: \"861147df-48f1-4c7d-b05e-a3eddec517e1\") " pod="openstack/dnsmasq-dns-57d769cc4f-drj4p" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.975003 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/861147df-48f1-4c7d-b05e-a3eddec517e1-config\") pod \"dnsmasq-dns-57d769cc4f-drj4p\" (UID: \"861147df-48f1-4c7d-b05e-a3eddec517e1\") " pod="openstack/dnsmasq-dns-57d769cc4f-drj4p" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.975051 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/861147df-48f1-4c7d-b05e-a3eddec517e1-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-drj4p\" (UID: \"861147df-48f1-4c7d-b05e-a3eddec517e1\") " pod="openstack/dnsmasq-dns-57d769cc4f-drj4p" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.975968 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/861147df-48f1-4c7d-b05e-a3eddec517e1-config\") pod \"dnsmasq-dns-57d769cc4f-drj4p\" (UID: \"861147df-48f1-4c7d-b05e-a3eddec517e1\") " pod="openstack/dnsmasq-dns-57d769cc4f-drj4p" Nov 28 13:38:30 crc kubenswrapper[4857]: I1128 13:38:30.976114 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/861147df-48f1-4c7d-b05e-a3eddec517e1-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-drj4p\" (UID: \"861147df-48f1-4c7d-b05e-a3eddec517e1\") " pod="openstack/dnsmasq-dns-57d769cc4f-drj4p" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.036285 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fx64m\" (UniqueName: \"kubernetes.io/projected/861147df-48f1-4c7d-b05e-a3eddec517e1-kube-api-access-fx64m\") pod \"dnsmasq-dns-57d769cc4f-drj4p\" (UID: \"861147df-48f1-4c7d-b05e-a3eddec517e1\") " pod="openstack/dnsmasq-dns-57d769cc4f-drj4p" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.188442 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-drj4p" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.384544 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-zl8dt"] Nov 28 13:38:31 crc kubenswrapper[4857]: W1128 13:38:31.414376 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5fbb4ae3_ae41_4be2_8f32_862531ae0737.slice/crio-7964d472251e0feeb17fb531c0cdc66538389bff12f12aaf5912c99ce7f0bbf2 WatchSource:0}: Error finding container 7964d472251e0feeb17fb531c0cdc66538389bff12f12aaf5912c99ce7f0bbf2: Status 404 returned error can't find the container with id 7964d472251e0feeb17fb531c0cdc66538389bff12f12aaf5912c99ce7f0bbf2 Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.534676 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.538601 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-zl8dt" event={"ID":"5fbb4ae3-ae41-4be2-8f32-862531ae0737","Type":"ContainerStarted","Data":"7964d472251e0feeb17fb531c0cdc66538389bff12f12aaf5912c99ce7f0bbf2"} Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.538692 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.543360 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.543638 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.543752 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.543864 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.543963 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.544070 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-lb6l5" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.544610 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.556545 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.588122 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.588173 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.588205 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.588226 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-config-data\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.588244 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.588269 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.588295 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.588310 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-server-conf\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.588331 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.588357 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-pod-info\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.588377 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dfc8\" (UniqueName: \"kubernetes.io/projected/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-kube-api-access-4dfc8\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.720597 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.720662 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-server-conf\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.720745 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.728553 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-server-conf\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.728778 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-pod-info\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.728838 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dfc8\" (UniqueName: \"kubernetes.io/projected/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-kube-api-access-4dfc8\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.728910 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.729074 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.729136 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.729282 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.729369 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-config-data\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.729393 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.729455 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.729812 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.730425 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.731151 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.731729 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-config-data\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.740044 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.740719 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.763514 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dfc8\" (UniqueName: \"kubernetes.io/projected/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-kube-api-access-4dfc8\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.764544 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.777416 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-pod-info\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.784279 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-drj4p"] Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.815948 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " pod="openstack/rabbitmq-server-0" Nov 28 13:38:31 crc kubenswrapper[4857]: I1128 13:38:31.867158 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.040263 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.051826 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.055639 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-tf9r6" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.056029 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.065310 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.070093 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.072946 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.077351 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.077608 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.079975 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.143946 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.143999 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.144069 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.144098 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/71cc1f00-1a63-428e-8f12-2136ab077860-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.144139 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.144179 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljgc2\" (UniqueName: \"kubernetes.io/projected/71cc1f00-1a63-428e-8f12-2136ab077860-kube-api-access-ljgc2\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.144225 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/71cc1f00-1a63-428e-8f12-2136ab077860-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.144257 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.144285 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.144321 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.144365 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.247614 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljgc2\" (UniqueName: \"kubernetes.io/projected/71cc1f00-1a63-428e-8f12-2136ab077860-kube-api-access-ljgc2\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.247693 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/71cc1f00-1a63-428e-8f12-2136ab077860-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.247733 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.247786 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.247826 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.247874 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.247934 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.247961 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.248010 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.248031 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/71cc1f00-1a63-428e-8f12-2136ab077860-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.248062 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.248402 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.249556 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.250653 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.251589 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.252032 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.252179 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.260785 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.261482 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/71cc1f00-1a63-428e-8f12-2136ab077860-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.261668 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/71cc1f00-1a63-428e-8f12-2136ab077860-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.273758 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.277325 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljgc2\" (UniqueName: \"kubernetes.io/projected/71cc1f00-1a63-428e-8f12-2136ab077860-kube-api-access-ljgc2\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.342323 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.435189 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.571549 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-drj4p" event={"ID":"861147df-48f1-4c7d-b05e-a3eddec517e1","Type":"ContainerStarted","Data":"9c246fb058f6394a09a2e3ebb1c818bb3dff31325fb600b2d99c8c2461682206"} Nov 28 13:38:32 crc kubenswrapper[4857]: I1128 13:38:32.607113 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.075495 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 13:38:33 crc kubenswrapper[4857]: W1128 13:38:33.117731 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71cc1f00_1a63_428e_8f12_2136ab077860.slice/crio-411e2fc221cd1d35da69d2b8483e64f10e30a3df064542e60d4be06817567a1c WatchSource:0}: Error finding container 411e2fc221cd1d35da69d2b8483e64f10e30a3df064542e60d4be06817567a1c: Status 404 returned error can't find the container with id 411e2fc221cd1d35da69d2b8483e64f10e30a3df064542e60d4be06817567a1c Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.223290 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.225489 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.228222 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-nwvzk" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.228700 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.228785 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.240966 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.241617 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.248575 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.378387 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.378457 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-kolla-config\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.378568 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-config-data-default\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.378598 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.378625 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.378652 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.378675 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.378712 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8f9h8\" (UniqueName: \"kubernetes.io/projected/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-kube-api-access-8f9h8\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.485019 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-config-data-default\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.485089 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.485111 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.485139 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.485156 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.485179 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8f9h8\" (UniqueName: \"kubernetes.io/projected/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-kube-api-access-8f9h8\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.485235 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.485253 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-kolla-config\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.489596 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.490167 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-config-data-default\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.490790 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-kolla-config\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.491193 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.492444 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.510918 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.512055 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8f9h8\" (UniqueName: \"kubernetes.io/projected/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-kube-api-access-8f9h8\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.521634 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.589873 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cfbd0457-d459-4bf2-bdaf-8b61db5cce65","Type":"ContainerStarted","Data":"d7bbbe16f23e34069c8fd52b92fbd34354c6eadcec7ef8b0cb7aeefa5a813137"} Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.592823 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"71cc1f00-1a63-428e-8f12-2136ab077860","Type":"ContainerStarted","Data":"411e2fc221cd1d35da69d2b8483e64f10e30a3df064542e60d4be06817567a1c"} Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.609625 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " pod="openstack/openstack-galera-0" Nov 28 13:38:33 crc kubenswrapper[4857]: I1128 13:38:33.904408 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.476898 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.479010 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.488709 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.490432 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-67pkr" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.491554 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.492624 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.526890 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.632568 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/41687469-06d7-47ab-ad25-d32df165e1e2-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.633045 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41687469-06d7-47ab-ad25-d32df165e1e2-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.633069 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/41687469-06d7-47ab-ad25-d32df165e1e2-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.633105 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99tbc\" (UniqueName: \"kubernetes.io/projected/41687469-06d7-47ab-ad25-d32df165e1e2-kube-api-access-99tbc\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.633173 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/41687469-06d7-47ab-ad25-d32df165e1e2-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.633470 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.633604 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41687469-06d7-47ab-ad25-d32df165e1e2-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.644103 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/41687469-06d7-47ab-ad25-d32df165e1e2-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.708437 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.710314 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.717596 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-flfdk" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.717963 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.718100 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.718730 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.736374 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.746189 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.746253 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41687469-06d7-47ab-ad25-d32df165e1e2-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.746288 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/41687469-06d7-47ab-ad25-d32df165e1e2-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.746343 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/41687469-06d7-47ab-ad25-d32df165e1e2-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.746361 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41687469-06d7-47ab-ad25-d32df165e1e2-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.746380 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/41687469-06d7-47ab-ad25-d32df165e1e2-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.746409 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99tbc\" (UniqueName: \"kubernetes.io/projected/41687469-06d7-47ab-ad25-d32df165e1e2-kube-api-access-99tbc\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.746474 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/41687469-06d7-47ab-ad25-d32df165e1e2-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.747991 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.748067 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/41687469-06d7-47ab-ad25-d32df165e1e2-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.748834 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/41687469-06d7-47ab-ad25-d32df165e1e2-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.748984 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/41687469-06d7-47ab-ad25-d32df165e1e2-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.750340 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41687469-06d7-47ab-ad25-d32df165e1e2-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.763790 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/41687469-06d7-47ab-ad25-d32df165e1e2-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.779501 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41687469-06d7-47ab-ad25-d32df165e1e2-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.781947 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99tbc\" (UniqueName: \"kubernetes.io/projected/41687469-06d7-47ab-ad25-d32df165e1e2-kube-api-access-99tbc\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.823923 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.842888 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.847400 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/30a2b522-ef43-4b0a-8215-2bb928744e00-kolla-config\") pod \"memcached-0\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " pod="openstack/memcached-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.847452 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pskmk\" (UniqueName: \"kubernetes.io/projected/30a2b522-ef43-4b0a-8215-2bb928744e00-kube-api-access-pskmk\") pod \"memcached-0\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " pod="openstack/memcached-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.847487 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30a2b522-ef43-4b0a-8215-2bb928744e00-combined-ca-bundle\") pod \"memcached-0\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " pod="openstack/memcached-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.847514 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/30a2b522-ef43-4b0a-8215-2bb928744e00-config-data\") pod \"memcached-0\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " pod="openstack/memcached-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.847551 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/30a2b522-ef43-4b0a-8215-2bb928744e00-memcached-tls-certs\") pod \"memcached-0\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " pod="openstack/memcached-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.948680 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/30a2b522-ef43-4b0a-8215-2bb928744e00-kolla-config\") pod \"memcached-0\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " pod="openstack/memcached-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.948719 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pskmk\" (UniqueName: \"kubernetes.io/projected/30a2b522-ef43-4b0a-8215-2bb928744e00-kube-api-access-pskmk\") pod \"memcached-0\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " pod="openstack/memcached-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.948772 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30a2b522-ef43-4b0a-8215-2bb928744e00-combined-ca-bundle\") pod \"memcached-0\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " pod="openstack/memcached-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.948797 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/30a2b522-ef43-4b0a-8215-2bb928744e00-config-data\") pod \"memcached-0\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " pod="openstack/memcached-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.948836 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/30a2b522-ef43-4b0a-8215-2bb928744e00-memcached-tls-certs\") pod \"memcached-0\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " pod="openstack/memcached-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.949682 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/30a2b522-ef43-4b0a-8215-2bb928744e00-kolla-config\") pod \"memcached-0\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " pod="openstack/memcached-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.954986 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/30a2b522-ef43-4b0a-8215-2bb928744e00-memcached-tls-certs\") pod \"memcached-0\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " pod="openstack/memcached-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.955164 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/30a2b522-ef43-4b0a-8215-2bb928744e00-config-data\") pod \"memcached-0\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " pod="openstack/memcached-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.962377 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30a2b522-ef43-4b0a-8215-2bb928744e00-combined-ca-bundle\") pod \"memcached-0\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " pod="openstack/memcached-0" Nov 28 13:38:34 crc kubenswrapper[4857]: I1128 13:38:34.976607 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pskmk\" (UniqueName: \"kubernetes.io/projected/30a2b522-ef43-4b0a-8215-2bb928744e00-kube-api-access-pskmk\") pod \"memcached-0\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " pod="openstack/memcached-0" Nov 28 13:38:35 crc kubenswrapper[4857]: I1128 13:38:35.053908 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 13:38:35 crc kubenswrapper[4857]: I1128 13:38:35.488644 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 13:38:35 crc kubenswrapper[4857]: W1128 13:38:35.533870 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod41687469_06d7_47ab_ad25_d32df165e1e2.slice/crio-273deb67415158afe19f6d7a4cc4025be96b58873f3ec2c41104f83ef6c13125 WatchSource:0}: Error finding container 273deb67415158afe19f6d7a4cc4025be96b58873f3ec2c41104f83ef6c13125: Status 404 returned error can't find the container with id 273deb67415158afe19f6d7a4cc4025be96b58873f3ec2c41104f83ef6c13125 Nov 28 13:38:35 crc kubenswrapper[4857]: I1128 13:38:35.639440 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7b0c1834-7ece-4d9c-9cf1-28a53aea280e","Type":"ContainerStarted","Data":"47b74021a1fe53b7d626c8a8aae43a33be047cff60be391678a65a393e3ab844"} Nov 28 13:38:35 crc kubenswrapper[4857]: I1128 13:38:35.643680 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"41687469-06d7-47ab-ad25-d32df165e1e2","Type":"ContainerStarted","Data":"273deb67415158afe19f6d7a4cc4025be96b58873f3ec2c41104f83ef6c13125"} Nov 28 13:38:35 crc kubenswrapper[4857]: I1128 13:38:35.665358 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 13:38:35 crc kubenswrapper[4857]: W1128 13:38:35.673998 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod30a2b522_ef43_4b0a_8215_2bb928744e00.slice/crio-dc6b682e84c6ca942b35366616802230819d0a851390b3d3f3d081955ffc7ea0 WatchSource:0}: Error finding container dc6b682e84c6ca942b35366616802230819d0a851390b3d3f3d081955ffc7ea0: Status 404 returned error can't find the container with id dc6b682e84c6ca942b35366616802230819d0a851390b3d3f3d081955ffc7ea0 Nov 28 13:38:36 crc kubenswrapper[4857]: I1128 13:38:36.564269 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:38:36 crc kubenswrapper[4857]: I1128 13:38:36.565806 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 13:38:36 crc kubenswrapper[4857]: I1128 13:38:36.572907 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-8drw6" Nov 28 13:38:36 crc kubenswrapper[4857]: I1128 13:38:36.582291 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:38:36 crc kubenswrapper[4857]: I1128 13:38:36.672425 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"30a2b522-ef43-4b0a-8215-2bb928744e00","Type":"ContainerStarted","Data":"dc6b682e84c6ca942b35366616802230819d0a851390b3d3f3d081955ffc7ea0"} Nov 28 13:38:36 crc kubenswrapper[4857]: I1128 13:38:36.678343 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvtbt\" (UniqueName: \"kubernetes.io/projected/db66469a-ca4a-4f4b-b657-70bf41cd45db-kube-api-access-bvtbt\") pod \"kube-state-metrics-0\" (UID: \"db66469a-ca4a-4f4b-b657-70bf41cd45db\") " pod="openstack/kube-state-metrics-0" Nov 28 13:38:36 crc kubenswrapper[4857]: I1128 13:38:36.782119 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvtbt\" (UniqueName: \"kubernetes.io/projected/db66469a-ca4a-4f4b-b657-70bf41cd45db-kube-api-access-bvtbt\") pod \"kube-state-metrics-0\" (UID: \"db66469a-ca4a-4f4b-b657-70bf41cd45db\") " pod="openstack/kube-state-metrics-0" Nov 28 13:38:36 crc kubenswrapper[4857]: I1128 13:38:36.815402 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvtbt\" (UniqueName: \"kubernetes.io/projected/db66469a-ca4a-4f4b-b657-70bf41cd45db-kube-api-access-bvtbt\") pod \"kube-state-metrics-0\" (UID: \"db66469a-ca4a-4f4b-b657-70bf41cd45db\") " pod="openstack/kube-state-metrics-0" Nov 28 13:38:36 crc kubenswrapper[4857]: I1128 13:38:36.915723 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 13:38:37 crc kubenswrapper[4857]: I1128 13:38:37.692778 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:38:38 crc kubenswrapper[4857]: I1128 13:38:38.716107 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"db66469a-ca4a-4f4b-b657-70bf41cd45db","Type":"ContainerStarted","Data":"8e12486fb2e5fb082001f8616760880207aa13f08d8847c07943bbb3e886edb5"} Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.081105 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-t99ql"] Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.110538 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.116242 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.116988 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.134667 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-t99ql"] Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.145524 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-4cg2w" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.174054 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-ph2cf"] Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.176387 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.189333 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-ph2cf"] Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.222212 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.224299 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.228370 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-txk5k" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.228875 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.230257 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.230330 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.230379 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.243551 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.305099 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1f7e362-6e6b-4636-b551-4533ad037811-ovn-controller-tls-certs\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.305137 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2l8j\" (UniqueName: \"kubernetes.io/projected/b1f7e362-6e6b-4636-b551-4533ad037811-kube-api-access-q2l8j\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.305177 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86j52\" (UniqueName: \"kubernetes.io/projected/c80a8609-29af-4833-856c-ee4094abcc0c-kube-api-access-86j52\") pod \"ovn-controller-ovs-ph2cf\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.305200 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-var-lib\") pod \"ovn-controller-ovs-ph2cf\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.305227 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b1f7e362-6e6b-4636-b551-4533ad037811-var-run\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.305248 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-var-log\") pod \"ovn-controller-ovs-ph2cf\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.305264 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b1f7e362-6e6b-4636-b551-4533ad037811-var-log-ovn\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.305308 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-var-run\") pod \"ovn-controller-ovs-ph2cf\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.305327 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b1f7e362-6e6b-4636-b551-4533ad037811-scripts\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.305346 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b1f7e362-6e6b-4636-b551-4533ad037811-var-run-ovn\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.305360 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1f7e362-6e6b-4636-b551-4533ad037811-combined-ca-bundle\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.305380 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-etc-ovs\") pod \"ovn-controller-ovs-ph2cf\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.305419 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c80a8609-29af-4833-856c-ee4094abcc0c-scripts\") pod \"ovn-controller-ovs-ph2cf\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.412587 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b1f7e362-6e6b-4636-b551-4533ad037811-scripts\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.412662 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b1f7e362-6e6b-4636-b551-4533ad037811-var-run-ovn\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.412682 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1f7e362-6e6b-4636-b551-4533ad037811-combined-ca-bundle\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.412706 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-etc-ovs\") pod \"ovn-controller-ovs-ph2cf\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.412721 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c80a8609-29af-4833-856c-ee4094abcc0c-scripts\") pod \"ovn-controller-ovs-ph2cf\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.417392 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.417468 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1f7e362-6e6b-4636-b551-4533ad037811-ovn-controller-tls-certs\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.417526 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-var-lib\") pod \"ovn-controller-ovs-ph2cf\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.417548 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.417678 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-var-log\") pod \"ovn-controller-ovs-ph2cf\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.417718 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-var-run\") pod \"ovn-controller-ovs-ph2cf\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.415810 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b1f7e362-6e6b-4636-b551-4533ad037811-scripts\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.417293 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c80a8609-29af-4833-856c-ee4094abcc0c-scripts\") pod \"ovn-controller-ovs-ph2cf\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.413299 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-etc-ovs\") pod \"ovn-controller-ovs-ph2cf\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.413461 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b1f7e362-6e6b-4636-b551-4533ad037811-var-run-ovn\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.417749 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.418230 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-var-lib\") pod \"ovn-controller-ovs-ph2cf\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.418272 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-var-run\") pod \"ovn-controller-ovs-ph2cf\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.418486 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-var-log\") pod \"ovn-controller-ovs-ph2cf\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.418511 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.418677 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2l8j\" (UniqueName: \"kubernetes.io/projected/b1f7e362-6e6b-4636-b551-4533ad037811-kube-api-access-q2l8j\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.418706 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.418905 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.419371 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86j52\" (UniqueName: \"kubernetes.io/projected/c80a8609-29af-4833-856c-ee4094abcc0c-kube-api-access-86j52\") pod \"ovn-controller-ovs-ph2cf\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.419706 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-config\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.419750 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b1f7e362-6e6b-4636-b551-4533ad037811-var-run\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.419809 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b1f7e362-6e6b-4636-b551-4533ad037811-var-log-ovn\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.419857 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b1f7e362-6e6b-4636-b551-4533ad037811-var-run\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.420064 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b1f7e362-6e6b-4636-b551-4533ad037811-var-log-ovn\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.420170 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwgcx\" (UniqueName: \"kubernetes.io/projected/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-kube-api-access-rwgcx\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.443459 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1f7e362-6e6b-4636-b551-4533ad037811-ovn-controller-tls-certs\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.443809 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1f7e362-6e6b-4636-b551-4533ad037811-combined-ca-bundle\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.470026 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86j52\" (UniqueName: \"kubernetes.io/projected/c80a8609-29af-4833-856c-ee4094abcc0c-kube-api-access-86j52\") pod \"ovn-controller-ovs-ph2cf\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.482379 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2l8j\" (UniqueName: \"kubernetes.io/projected/b1f7e362-6e6b-4636-b551-4533ad037811-kube-api-access-q2l8j\") pod \"ovn-controller-t99ql\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.512296 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.521705 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.521794 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.521831 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-config\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.521871 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwgcx\" (UniqueName: \"kubernetes.io/projected/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-kube-api-access-rwgcx\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.521912 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.521968 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.522014 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.522036 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.522202 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.522961 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-config\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.522995 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.523373 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.532806 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.533021 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.533977 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.555967 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.556902 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwgcx\" (UniqueName: \"kubernetes.io/projected/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-kube-api-access-rwgcx\") pod \"ovsdbserver-nb-0\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.761105 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-t99ql" Nov 28 13:38:40 crc kubenswrapper[4857]: I1128 13:38:40.845393 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.082638 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.086234 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.091547 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-nrggz" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.091599 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.091601 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.091661 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.107691 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.196108 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.196171 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4qlz\" (UniqueName: \"kubernetes.io/projected/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-kube-api-access-v4qlz\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.196242 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.196370 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-config\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.196481 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.196511 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.196545 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.196566 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.298219 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.298312 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-config\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.298387 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.298433 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.298501 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.298533 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.298584 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.298624 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4qlz\" (UniqueName: \"kubernetes.io/projected/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-kube-api-access-v4qlz\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.299854 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.301228 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-config\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.301246 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.301237 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.306706 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.306967 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.307909 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.334146 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.348512 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4qlz\" (UniqueName: \"kubernetes.io/projected/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-kube-api-access-v4qlz\") pod \"ovsdbserver-sb-0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:44 crc kubenswrapper[4857]: I1128 13:38:44.406193 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 13:38:58 crc kubenswrapper[4857]: E1128 13:38:58.092602 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Nov 28 13:38:58 crc kubenswrapper[4857]: E1128 13:38:58.093305 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n669h585h5b7h5c4h655h95h646h667hbch649h5c4h5d8h9ch66fh547hdbh7dh5b9hb9h5d5h674h9dh664h89h68hdchd8h565h68ch8dh5b9h5bbq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pskmk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(30a2b522-ef43-4b0a-8215-2bb928744e00): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:38:58 crc kubenswrapper[4857]: E1128 13:38:58.094453 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="30a2b522-ef43-4b0a-8215-2bb928744e00" Nov 28 13:38:58 crc kubenswrapper[4857]: E1128 13:38:58.105640 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 28 13:38:58 crc kubenswrapper[4857]: E1128 13:38:58.105788 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4dfc8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(cfbd0457-d459-4bf2-bdaf-8b61db5cce65): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:38:58 crc kubenswrapper[4857]: E1128 13:38:58.107042 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="cfbd0457-d459-4bf2-bdaf-8b61db5cce65" Nov 28 13:38:58 crc kubenswrapper[4857]: E1128 13:38:58.358438 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="30a2b522-ef43-4b0a-8215-2bb928744e00" Nov 28 13:38:58 crc kubenswrapper[4857]: E1128 13:38:58.359529 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="cfbd0457-d459-4bf2-bdaf-8b61db5cce65" Nov 28 13:38:59 crc kubenswrapper[4857]: E1128 13:38:59.956420 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 28 13:38:59 crc kubenswrapper[4857]: E1128 13:38:59.956639 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8f9h8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(7b0c1834-7ece-4d9c-9cf1-28a53aea280e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:38:59 crc kubenswrapper[4857]: E1128 13:38:59.957851 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="7b0c1834-7ece-4d9c-9cf1-28a53aea280e" Nov 28 13:39:00 crc kubenswrapper[4857]: E1128 13:39:00.011900 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 28 13:39:00 crc kubenswrapper[4857]: E1128 13:39:00.012094 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-99tbc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(41687469-06d7-47ab-ad25-d32df165e1e2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:39:00 crc kubenswrapper[4857]: E1128 13:39:00.013994 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="41687469-06d7-47ab-ad25-d32df165e1e2" Nov 28 13:39:00 crc kubenswrapper[4857]: E1128 13:39:00.371124 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="7b0c1834-7ece-4d9c-9cf1-28a53aea280e" Nov 28 13:39:00 crc kubenswrapper[4857]: E1128 13:39:00.371151 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="41687469-06d7-47ab-ad25-d32df165e1e2" Nov 28 13:39:04 crc kubenswrapper[4857]: E1128 13:39:04.391340 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 13:39:04 crc kubenswrapper[4857]: E1128 13:39:04.392202 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hwmgj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-zl8dt_openstack(5fbb4ae3-ae41-4be2-8f32-862531ae0737): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:39:04 crc kubenswrapper[4857]: E1128 13:39:04.394035 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-zl8dt" podUID="5fbb4ae3-ae41-4be2-8f32-862531ae0737" Nov 28 13:39:04 crc kubenswrapper[4857]: E1128 13:39:04.409991 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-zl8dt" podUID="5fbb4ae3-ae41-4be2-8f32-862531ae0737" Nov 28 13:39:04 crc kubenswrapper[4857]: E1128 13:39:04.423056 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 13:39:04 crc kubenswrapper[4857]: E1128 13:39:04.423211 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fx64m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-drj4p_openstack(861147df-48f1-4c7d-b05e-a3eddec517e1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:39:04 crc kubenswrapper[4857]: E1128 13:39:04.424637 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-drj4p" podUID="861147df-48f1-4c7d-b05e-a3eddec517e1" Nov 28 13:39:04 crc kubenswrapper[4857]: E1128 13:39:04.690965 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 13:39:04 crc kubenswrapper[4857]: E1128 13:39:04.691465 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-52w2j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-hr8rg_openstack(d865a2dc-946b-4a9c-814f-3406432d5a4e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:39:04 crc kubenswrapper[4857]: E1128 13:39:04.692885 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-hr8rg" podUID="d865a2dc-946b-4a9c-814f-3406432d5a4e" Nov 28 13:39:04 crc kubenswrapper[4857]: E1128 13:39:04.702790 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 13:39:04 crc kubenswrapper[4857]: E1128 13:39:04.702980 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2cds5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-cn2c5_openstack(1de1ce93-7a6d-4d58-90cd-03d94d4a1a32): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:39:04 crc kubenswrapper[4857]: E1128 13:39:04.704193 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-cn2c5" podUID="1de1ce93-7a6d-4d58-90cd-03d94d4a1a32" Nov 28 13:39:04 crc kubenswrapper[4857]: I1128 13:39:04.771662 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-t99ql"] Nov 28 13:39:05 crc kubenswrapper[4857]: E1128 13:39:05.203490 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:9aee425378d2c16cd44177dc54a274b312897f5860a8e78fdfda555a0d79dd71: Get \"https://registry.k8s.io/v2/kube-state-metrics/kube-state-metrics/blobs/sha256:9aee425378d2c16cd44177dc54a274b312897f5860a8e78fdfda555a0d79dd71\": context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 28 13:39:05 crc kubenswrapper[4857]: E1128 13:39:05.203920 4857 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:9aee425378d2c16cd44177dc54a274b312897f5860a8e78fdfda555a0d79dd71: Get \"https://registry.k8s.io/v2/kube-state-metrics/kube-state-metrics/blobs/sha256:9aee425378d2c16cd44177dc54a274b312897f5860a8e78fdfda555a0d79dd71\": context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 28 13:39:05 crc kubenswrapper[4857]: E1128 13:39:05.204096 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bvtbt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(db66469a-ca4a-4f4b-b657-70bf41cd45db): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:9aee425378d2c16cd44177dc54a274b312897f5860a8e78fdfda555a0d79dd71: Get \"https://registry.k8s.io/v2/kube-state-metrics/kube-state-metrics/blobs/sha256:9aee425378d2c16cd44177dc54a274b312897f5860a8e78fdfda555a0d79dd71\": context canceled" logger="UnhandledError" Nov 28 13:39:05 crc kubenswrapper[4857]: E1128 13:39:05.205407 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:9aee425378d2c16cd44177dc54a274b312897f5860a8e78fdfda555a0d79dd71: Get \\\"https://registry.k8s.io/v2/kube-state-metrics/kube-state-metrics/blobs/sha256:9aee425378d2c16cd44177dc54a274b312897f5860a8e78fdfda555a0d79dd71\\\": context canceled\"" pod="openstack/kube-state-metrics-0" podUID="db66469a-ca4a-4f4b-b657-70bf41cd45db" Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.216753 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 13:39:05 crc kubenswrapper[4857]: W1128 13:39:05.324620 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc80a8609_29af_4833_856c_ee4094abcc0c.slice/crio-990556a080bb034b9f42137b7735b852486767ddf9a79c21d5d0d82ecc2e190d WatchSource:0}: Error finding container 990556a080bb034b9f42137b7735b852486767ddf9a79c21d5d0d82ecc2e190d: Status 404 returned error can't find the container with id 990556a080bb034b9f42137b7735b852486767ddf9a79c21d5d0d82ecc2e190d Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.325254 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-ph2cf"] Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.413354 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"e2fec95b-4e40-4761-9d14-6abfeb78d9c0","Type":"ContainerStarted","Data":"457de2cc0d2a22e0e6bf0896f8d33c2d72e60d0f25a91797147a73b2413af013"} Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.415155 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-ph2cf" event={"ID":"c80a8609-29af-4833-856c-ee4094abcc0c","Type":"ContainerStarted","Data":"990556a080bb034b9f42137b7735b852486767ddf9a79c21d5d0d82ecc2e190d"} Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.416955 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-t99ql" event={"ID":"b1f7e362-6e6b-4636-b551-4533ad037811","Type":"ContainerStarted","Data":"caad3084e358c3068d0b0f33e081298a4dc76f14aee40ff63720ff962289097d"} Nov 28 13:39:05 crc kubenswrapper[4857]: E1128 13:39:05.417638 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-drj4p" podUID="861147df-48f1-4c7d-b05e-a3eddec517e1" Nov 28 13:39:05 crc kubenswrapper[4857]: E1128 13:39:05.418147 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="db66469a-ca4a-4f4b-b657-70bf41cd45db" Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.729443 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-hr8rg" Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.813655 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-cn2c5" Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.888719 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d865a2dc-946b-4a9c-814f-3406432d5a4e-config\") pod \"d865a2dc-946b-4a9c-814f-3406432d5a4e\" (UID: \"d865a2dc-946b-4a9c-814f-3406432d5a4e\") " Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.888797 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-52w2j\" (UniqueName: \"kubernetes.io/projected/d865a2dc-946b-4a9c-814f-3406432d5a4e-kube-api-access-52w2j\") pod \"d865a2dc-946b-4a9c-814f-3406432d5a4e\" (UID: \"d865a2dc-946b-4a9c-814f-3406432d5a4e\") " Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.889289 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d865a2dc-946b-4a9c-814f-3406432d5a4e-config" (OuterVolumeSpecName: "config") pod "d865a2dc-946b-4a9c-814f-3406432d5a4e" (UID: "d865a2dc-946b-4a9c-814f-3406432d5a4e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.912002 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d865a2dc-946b-4a9c-814f-3406432d5a4e-kube-api-access-52w2j" (OuterVolumeSpecName: "kube-api-access-52w2j") pod "d865a2dc-946b-4a9c-814f-3406432d5a4e" (UID: "d865a2dc-946b-4a9c-814f-3406432d5a4e"). InnerVolumeSpecName "kube-api-access-52w2j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.929799 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.990531 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32-dns-svc\") pod \"1de1ce93-7a6d-4d58-90cd-03d94d4a1a32\" (UID: \"1de1ce93-7a6d-4d58-90cd-03d94d4a1a32\") " Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.990610 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32-config\") pod \"1de1ce93-7a6d-4d58-90cd-03d94d4a1a32\" (UID: \"1de1ce93-7a6d-4d58-90cd-03d94d4a1a32\") " Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.990723 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2cds5\" (UniqueName: \"kubernetes.io/projected/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32-kube-api-access-2cds5\") pod \"1de1ce93-7a6d-4d58-90cd-03d94d4a1a32\" (UID: \"1de1ce93-7a6d-4d58-90cd-03d94d4a1a32\") " Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.991119 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d865a2dc-946b-4a9c-814f-3406432d5a4e-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.991143 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-52w2j\" (UniqueName: \"kubernetes.io/projected/d865a2dc-946b-4a9c-814f-3406432d5a4e-kube-api-access-52w2j\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.991189 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32-config" (OuterVolumeSpecName: "config") pod "1de1ce93-7a6d-4d58-90cd-03d94d4a1a32" (UID: "1de1ce93-7a6d-4d58-90cd-03d94d4a1a32"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:05 crc kubenswrapper[4857]: I1128 13:39:05.991200 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1de1ce93-7a6d-4d58-90cd-03d94d4a1a32" (UID: "1de1ce93-7a6d-4d58-90cd-03d94d4a1a32"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:06 crc kubenswrapper[4857]: I1128 13:39:06.054816 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32-kube-api-access-2cds5" (OuterVolumeSpecName: "kube-api-access-2cds5") pod "1de1ce93-7a6d-4d58-90cd-03d94d4a1a32" (UID: "1de1ce93-7a6d-4d58-90cd-03d94d4a1a32"). InnerVolumeSpecName "kube-api-access-2cds5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:39:06 crc kubenswrapper[4857]: I1128 13:39:06.093469 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:06 crc kubenswrapper[4857]: I1128 13:39:06.093554 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:06 crc kubenswrapper[4857]: I1128 13:39:06.093569 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2cds5\" (UniqueName: \"kubernetes.io/projected/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32-kube-api-access-2cds5\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:06 crc kubenswrapper[4857]: I1128 13:39:06.441932 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-hr8rg" event={"ID":"d865a2dc-946b-4a9c-814f-3406432d5a4e","Type":"ContainerDied","Data":"19710d2efdec782295f35f4309e52bee137eafd7d9f52cab67080553ef2d9162"} Nov 28 13:39:06 crc kubenswrapper[4857]: I1128 13:39:06.441948 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-hr8rg" Nov 28 13:39:06 crc kubenswrapper[4857]: I1128 13:39:06.443585 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"71cc1f00-1a63-428e-8f12-2136ab077860","Type":"ContainerStarted","Data":"da003013615b1f7d03fb067beb76ca6840f95de6e79bbeb6ebc074ff574b4949"} Nov 28 13:39:06 crc kubenswrapper[4857]: I1128 13:39:06.445886 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"6133e02f-8ece-4b6b-ac4a-c3871e017c1e","Type":"ContainerStarted","Data":"69d3044fe2e1fa6c916ac564e064f861f8402dc3a6ca1d761f53522ebfd67093"} Nov 28 13:39:06 crc kubenswrapper[4857]: I1128 13:39:06.447987 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-cn2c5" event={"ID":"1de1ce93-7a6d-4d58-90cd-03d94d4a1a32","Type":"ContainerDied","Data":"1591c6d7df081583c97677facf438ae0ebb965ff44314a1f5003d13f8afd53e4"} Nov 28 13:39:06 crc kubenswrapper[4857]: I1128 13:39:06.448075 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-cn2c5" Nov 28 13:39:06 crc kubenswrapper[4857]: I1128 13:39:06.502091 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-hr8rg"] Nov 28 13:39:06 crc kubenswrapper[4857]: I1128 13:39:06.517796 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-hr8rg"] Nov 28 13:39:06 crc kubenswrapper[4857]: I1128 13:39:06.539255 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-cn2c5"] Nov 28 13:39:06 crc kubenswrapper[4857]: I1128 13:39:06.539306 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-cn2c5"] Nov 28 13:39:08 crc kubenswrapper[4857]: I1128 13:39:08.322163 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1de1ce93-7a6d-4d58-90cd-03d94d4a1a32" path="/var/lib/kubelet/pods/1de1ce93-7a6d-4d58-90cd-03d94d4a1a32/volumes" Nov 28 13:39:08 crc kubenswrapper[4857]: I1128 13:39:08.323236 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d865a2dc-946b-4a9c-814f-3406432d5a4e" path="/var/lib/kubelet/pods/d865a2dc-946b-4a9c-814f-3406432d5a4e/volumes" Nov 28 13:39:08 crc kubenswrapper[4857]: I1128 13:39:08.469841 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"6133e02f-8ece-4b6b-ac4a-c3871e017c1e","Type":"ContainerStarted","Data":"7f7bc0064f521471cf62c8788f876e9c2ad9aae9c8e92b24025dd3c24bcd9aaf"} Nov 28 13:39:08 crc kubenswrapper[4857]: I1128 13:39:08.471423 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-t99ql" event={"ID":"b1f7e362-6e6b-4636-b551-4533ad037811","Type":"ContainerStarted","Data":"015bbd0a1a1e9fb405214fe7a35a6c512629833b2d306bd11d97dfd7b5021dee"} Nov 28 13:39:08 crc kubenswrapper[4857]: I1128 13:39:08.472380 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-t99ql" Nov 28 13:39:08 crc kubenswrapper[4857]: I1128 13:39:08.474548 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"e2fec95b-4e40-4761-9d14-6abfeb78d9c0","Type":"ContainerStarted","Data":"bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a"} Nov 28 13:39:08 crc kubenswrapper[4857]: I1128 13:39:08.475952 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-ph2cf" event={"ID":"c80a8609-29af-4833-856c-ee4094abcc0c","Type":"ContainerStarted","Data":"e216cbd60b2d5cadbec0b1b5aaf62d4aa986739fbe4ec14e55bb470d382294b3"} Nov 28 13:39:08 crc kubenswrapper[4857]: I1128 13:39:08.492835 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-t99ql" podStartSLOduration=25.499286824 podStartE2EDuration="28.492807423s" podCreationTimestamp="2025-11-28 13:38:40 +0000 UTC" firstStartedPulling="2025-11-28 13:39:04.982045483 +0000 UTC m=+1237.009420650" lastFinishedPulling="2025-11-28 13:39:07.975566082 +0000 UTC m=+1240.002941249" observedRunningTime="2025-11-28 13:39:08.489699583 +0000 UTC m=+1240.517074760" watchObservedRunningTime="2025-11-28 13:39:08.492807423 +0000 UTC m=+1240.520182590" Nov 28 13:39:09 crc kubenswrapper[4857]: I1128 13:39:09.488571 4857 generic.go:334] "Generic (PLEG): container finished" podID="c80a8609-29af-4833-856c-ee4094abcc0c" containerID="e216cbd60b2d5cadbec0b1b5aaf62d4aa986739fbe4ec14e55bb470d382294b3" exitCode=0 Nov 28 13:39:09 crc kubenswrapper[4857]: I1128 13:39:09.488667 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-ph2cf" event={"ID":"c80a8609-29af-4833-856c-ee4094abcc0c","Type":"ContainerDied","Data":"e216cbd60b2d5cadbec0b1b5aaf62d4aa986739fbe4ec14e55bb470d382294b3"} Nov 28 13:39:10 crc kubenswrapper[4857]: I1128 13:39:10.501613 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-ph2cf" event={"ID":"c80a8609-29af-4833-856c-ee4094abcc0c","Type":"ContainerStarted","Data":"7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c"} Nov 28 13:39:11 crc kubenswrapper[4857]: I1128 13:39:11.516537 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-ph2cf" event={"ID":"c80a8609-29af-4833-856c-ee4094abcc0c","Type":"ContainerStarted","Data":"8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a"} Nov 28 13:39:11 crc kubenswrapper[4857]: I1128 13:39:11.516996 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:39:11 crc kubenswrapper[4857]: I1128 13:39:11.517009 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:39:11 crc kubenswrapper[4857]: I1128 13:39:11.541003 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-ph2cf" podStartSLOduration=28.890561465 podStartE2EDuration="31.540981123s" podCreationTimestamp="2025-11-28 13:38:40 +0000 UTC" firstStartedPulling="2025-11-28 13:39:05.32639591 +0000 UTC m=+1237.353771077" lastFinishedPulling="2025-11-28 13:39:07.976815568 +0000 UTC m=+1240.004190735" observedRunningTime="2025-11-28 13:39:11.535103323 +0000 UTC m=+1243.562478490" watchObservedRunningTime="2025-11-28 13:39:11.540981123 +0000 UTC m=+1243.568356290" Nov 28 13:39:12 crc kubenswrapper[4857]: I1128 13:39:12.525989 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"e2fec95b-4e40-4761-9d14-6abfeb78d9c0","Type":"ContainerStarted","Data":"8dea0ac70575cce5895b3d523d663e080996fff826c13ed3d4f241bb7bf27649"} Nov 28 13:39:12 crc kubenswrapper[4857]: I1128 13:39:12.527765 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"30a2b522-ef43-4b0a-8215-2bb928744e00","Type":"ContainerStarted","Data":"32156962d3c5fd3e7bbc12ce4bc19050625834d7bd9f60bcb681cfb5610ca641"} Nov 28 13:39:12 crc kubenswrapper[4857]: I1128 13:39:12.527977 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 28 13:39:12 crc kubenswrapper[4857]: I1128 13:39:12.530003 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"6133e02f-8ece-4b6b-ac4a-c3871e017c1e","Type":"ContainerStarted","Data":"1412a3b0dbe8994e239691c6f96324d04036bd95c69256db95ab0b15e2c14255"} Nov 28 13:39:12 crc kubenswrapper[4857]: I1128 13:39:12.553126 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=23.213148125 podStartE2EDuration="29.553106077s" podCreationTimestamp="2025-11-28 13:38:43 +0000 UTC" firstStartedPulling="2025-11-28 13:39:05.224783709 +0000 UTC m=+1237.252158876" lastFinishedPulling="2025-11-28 13:39:11.564741661 +0000 UTC m=+1243.592116828" observedRunningTime="2025-11-28 13:39:12.550609694 +0000 UTC m=+1244.577984872" watchObservedRunningTime="2025-11-28 13:39:12.553106077 +0000 UTC m=+1244.580481244" Nov 28 13:39:12 crc kubenswrapper[4857]: I1128 13:39:12.587837 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=27.973973853 podStartE2EDuration="33.587815011s" podCreationTimestamp="2025-11-28 13:38:39 +0000 UTC" firstStartedPulling="2025-11-28 13:39:05.950934904 +0000 UTC m=+1237.978310091" lastFinishedPulling="2025-11-28 13:39:11.564776082 +0000 UTC m=+1243.592151249" observedRunningTime="2025-11-28 13:39:12.580345115 +0000 UTC m=+1244.607720292" watchObservedRunningTime="2025-11-28 13:39:12.587815011 +0000 UTC m=+1244.615190188" Nov 28 13:39:12 crc kubenswrapper[4857]: I1128 13:39:12.598547 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=4.457343761 podStartE2EDuration="38.598522791s" podCreationTimestamp="2025-11-28 13:38:34 +0000 UTC" firstStartedPulling="2025-11-28 13:38:35.67834832 +0000 UTC m=+1207.705723487" lastFinishedPulling="2025-11-28 13:39:09.81952735 +0000 UTC m=+1241.846902517" observedRunningTime="2025-11-28 13:39:12.595867244 +0000 UTC m=+1244.623242451" watchObservedRunningTime="2025-11-28 13:39:12.598522791 +0000 UTC m=+1244.625897958" Nov 28 13:39:13 crc kubenswrapper[4857]: I1128 13:39:13.546175 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cfbd0457-d459-4bf2-bdaf-8b61db5cce65","Type":"ContainerStarted","Data":"0b4fdb93170b6f9968d2f6150fa31e0cec84ad5ec1c7df2d3bf7d8ff7467e6e7"} Nov 28 13:39:13 crc kubenswrapper[4857]: I1128 13:39:13.845965 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 28 13:39:13 crc kubenswrapper[4857]: I1128 13:39:13.895374 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 28 13:39:14 crc kubenswrapper[4857]: I1128 13:39:14.406926 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 28 13:39:14 crc kubenswrapper[4857]: I1128 13:39:14.406979 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 28 13:39:14 crc kubenswrapper[4857]: I1128 13:39:14.456796 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 28 13:39:14 crc kubenswrapper[4857]: I1128 13:39:14.554732 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7b0c1834-7ece-4d9c-9cf1-28a53aea280e","Type":"ContainerStarted","Data":"f3d21b615ee4666a889ddc77f9bc3da38000a4886f57b39297860f562322b2b3"} Nov 28 13:39:14 crc kubenswrapper[4857]: I1128 13:39:14.556223 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"41687469-06d7-47ab-ad25-d32df165e1e2","Type":"ContainerStarted","Data":"c3560dc43f3cda5bfbcfd827097587a99cc977e3650dcfdd935c96bc56677b06"} Nov 28 13:39:14 crc kubenswrapper[4857]: I1128 13:39:14.556718 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 28 13:39:14 crc kubenswrapper[4857]: I1128 13:39:14.595162 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 28 13:39:14 crc kubenswrapper[4857]: I1128 13:39:14.604520 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 28 13:39:14 crc kubenswrapper[4857]: I1128 13:39:14.812646 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-drj4p"] Nov 28 13:39:14 crc kubenswrapper[4857]: I1128 13:39:14.862504 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-m48cv"] Nov 28 13:39:14 crc kubenswrapper[4857]: I1128 13:39:14.864164 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" Nov 28 13:39:14 crc kubenswrapper[4857]: I1128 13:39:14.876177 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 28 13:39:14 crc kubenswrapper[4857]: I1128 13:39:14.897872 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-m48cv"] Nov 28 13:39:14 crc kubenswrapper[4857]: I1128 13:39:14.941765 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fb09eb7d-1f7b-4613-900e-13de7585edef-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-m48cv\" (UID: \"fb09eb7d-1f7b-4613-900e-13de7585edef\") " pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" Nov 28 13:39:14 crc kubenswrapper[4857]: I1128 13:39:14.941823 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb09eb7d-1f7b-4613-900e-13de7585edef-config\") pod \"dnsmasq-dns-6bc7876d45-m48cv\" (UID: \"fb09eb7d-1f7b-4613-900e-13de7585edef\") " pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" Nov 28 13:39:14 crc kubenswrapper[4857]: I1128 13:39:14.941928 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghtmx\" (UniqueName: \"kubernetes.io/projected/fb09eb7d-1f7b-4613-900e-13de7585edef-kube-api-access-ghtmx\") pod \"dnsmasq-dns-6bc7876d45-m48cv\" (UID: \"fb09eb7d-1f7b-4613-900e-13de7585edef\") " pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" Nov 28 13:39:14 crc kubenswrapper[4857]: I1128 13:39:14.941959 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fb09eb7d-1f7b-4613-900e-13de7585edef-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-m48cv\" (UID: \"fb09eb7d-1f7b-4613-900e-13de7585edef\") " pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.043285 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fb09eb7d-1f7b-4613-900e-13de7585edef-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-m48cv\" (UID: \"fb09eb7d-1f7b-4613-900e-13de7585edef\") " pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.043715 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb09eb7d-1f7b-4613-900e-13de7585edef-config\") pod \"dnsmasq-dns-6bc7876d45-m48cv\" (UID: \"fb09eb7d-1f7b-4613-900e-13de7585edef\") " pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.043967 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghtmx\" (UniqueName: \"kubernetes.io/projected/fb09eb7d-1f7b-4613-900e-13de7585edef-kube-api-access-ghtmx\") pod \"dnsmasq-dns-6bc7876d45-m48cv\" (UID: \"fb09eb7d-1f7b-4613-900e-13de7585edef\") " pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.044103 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fb09eb7d-1f7b-4613-900e-13de7585edef-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-m48cv\" (UID: \"fb09eb7d-1f7b-4613-900e-13de7585edef\") " pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.044826 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fb09eb7d-1f7b-4613-900e-13de7585edef-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-m48cv\" (UID: \"fb09eb7d-1f7b-4613-900e-13de7585edef\") " pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.045081 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb09eb7d-1f7b-4613-900e-13de7585edef-config\") pod \"dnsmasq-dns-6bc7876d45-m48cv\" (UID: \"fb09eb7d-1f7b-4613-900e-13de7585edef\") " pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.045312 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fb09eb7d-1f7b-4613-900e-13de7585edef-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-m48cv\" (UID: \"fb09eb7d-1f7b-4613-900e-13de7585edef\") " pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.063204 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-k7b77"] Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.064268 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.067770 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.075599 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghtmx\" (UniqueName: \"kubernetes.io/projected/fb09eb7d-1f7b-4613-900e-13de7585edef-kube-api-access-ghtmx\") pod \"dnsmasq-dns-6bc7876d45-m48cv\" (UID: \"fb09eb7d-1f7b-4613-900e-13de7585edef\") " pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.083910 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-k7b77"] Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.147745 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zkdm\" (UniqueName: \"kubernetes.io/projected/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-kube-api-access-9zkdm\") pod \"ovn-controller-metrics-k7b77\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.147858 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-ovs-rundir\") pod \"ovn-controller-metrics-k7b77\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.147949 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-ovn-rundir\") pod \"ovn-controller-metrics-k7b77\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.148030 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-config\") pod \"ovn-controller-metrics-k7b77\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.148103 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-combined-ca-bundle\") pod \"ovn-controller-metrics-k7b77\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.148149 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-k7b77\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.174973 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.176423 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.186632 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.187050 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.187054 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.187225 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.187316 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-ckktw" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.191069 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-zl8dt"] Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.230729 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.249200 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-combined-ca-bundle\") pod \"ovn-controller-metrics-k7b77\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.249244 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-k7b77\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.249267 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/076d849e-fd88-4add-a5f9-e45a1983a606-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.249287 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/076d849e-fd88-4add-a5f9-e45a1983a606-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.249338 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/076d849e-fd88-4add-a5f9-e45a1983a606-scripts\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.249361 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/076d849e-fd88-4add-a5f9-e45a1983a606-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.249395 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7d42\" (UniqueName: \"kubernetes.io/projected/076d849e-fd88-4add-a5f9-e45a1983a606-kube-api-access-s7d42\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.249419 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/076d849e-fd88-4add-a5f9-e45a1983a606-config\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.249446 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zkdm\" (UniqueName: \"kubernetes.io/projected/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-kube-api-access-9zkdm\") pod \"ovn-controller-metrics-k7b77\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.249465 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/076d849e-fd88-4add-a5f9-e45a1983a606-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.249498 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-ovs-rundir\") pod \"ovn-controller-metrics-k7b77\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.249520 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-ovn-rundir\") pod \"ovn-controller-metrics-k7b77\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.249549 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-config\") pod \"ovn-controller-metrics-k7b77\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.250382 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-config\") pod \"ovn-controller-metrics-k7b77\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.251730 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-ovs-rundir\") pod \"ovn-controller-metrics-k7b77\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.251842 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-ovn-rundir\") pod \"ovn-controller-metrics-k7b77\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.262238 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-combined-ca-bundle\") pod \"ovn-controller-metrics-k7b77\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.273872 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-m8rq9"] Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.275346 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.275838 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zkdm\" (UniqueName: \"kubernetes.io/projected/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-kube-api-access-9zkdm\") pod \"ovn-controller-metrics-k7b77\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.277566 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.299517 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-m8rq9"] Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.318656 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-k7b77\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.352964 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-m8rq9\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.353032 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7d42\" (UniqueName: \"kubernetes.io/projected/076d849e-fd88-4add-a5f9-e45a1983a606-kube-api-access-s7d42\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.353069 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/076d849e-fd88-4add-a5f9-e45a1983a606-config\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.353105 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/076d849e-fd88-4add-a5f9-e45a1983a606-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.353154 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fzmj\" (UniqueName: \"kubernetes.io/projected/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-kube-api-access-9fzmj\") pod \"dnsmasq-dns-8554648995-m8rq9\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.353187 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-dns-svc\") pod \"dnsmasq-dns-8554648995-m8rq9\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.353220 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-config\") pod \"dnsmasq-dns-8554648995-m8rq9\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.353259 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-m8rq9\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.353281 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/076d849e-fd88-4add-a5f9-e45a1983a606-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.353300 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/076d849e-fd88-4add-a5f9-e45a1983a606-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.353343 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/076d849e-fd88-4add-a5f9-e45a1983a606-scripts\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.353360 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/076d849e-fd88-4add-a5f9-e45a1983a606-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.355728 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/076d849e-fd88-4add-a5f9-e45a1983a606-config\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.356587 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/076d849e-fd88-4add-a5f9-e45a1983a606-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.360685 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/076d849e-fd88-4add-a5f9-e45a1983a606-scripts\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.364065 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/076d849e-fd88-4add-a5f9-e45a1983a606-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.372112 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7d42\" (UniqueName: \"kubernetes.io/projected/076d849e-fd88-4add-a5f9-e45a1983a606-kube-api-access-s7d42\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.386821 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/076d849e-fd88-4add-a5f9-e45a1983a606-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.391652 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/076d849e-fd88-4add-a5f9-e45a1983a606-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.405822 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.410549 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-drj4p" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.464199 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/861147df-48f1-4c7d-b05e-a3eddec517e1-config\") pod \"861147df-48f1-4c7d-b05e-a3eddec517e1\" (UID: \"861147df-48f1-4c7d-b05e-a3eddec517e1\") " Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.464484 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fx64m\" (UniqueName: \"kubernetes.io/projected/861147df-48f1-4c7d-b05e-a3eddec517e1-kube-api-access-fx64m\") pod \"861147df-48f1-4c7d-b05e-a3eddec517e1\" (UID: \"861147df-48f1-4c7d-b05e-a3eddec517e1\") " Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.464544 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/861147df-48f1-4c7d-b05e-a3eddec517e1-dns-svc\") pod \"861147df-48f1-4c7d-b05e-a3eddec517e1\" (UID: \"861147df-48f1-4c7d-b05e-a3eddec517e1\") " Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.464544 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/861147df-48f1-4c7d-b05e-a3eddec517e1-config" (OuterVolumeSpecName: "config") pod "861147df-48f1-4c7d-b05e-a3eddec517e1" (UID: "861147df-48f1-4c7d-b05e-a3eddec517e1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.466139 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/861147df-48f1-4c7d-b05e-a3eddec517e1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "861147df-48f1-4c7d-b05e-a3eddec517e1" (UID: "861147df-48f1-4c7d-b05e-a3eddec517e1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.474093 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fzmj\" (UniqueName: \"kubernetes.io/projected/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-kube-api-access-9fzmj\") pod \"dnsmasq-dns-8554648995-m8rq9\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.474295 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-dns-svc\") pod \"dnsmasq-dns-8554648995-m8rq9\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.475899 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-config\") pod \"dnsmasq-dns-8554648995-m8rq9\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.476030 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-m8rq9\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.476167 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-m8rq9\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.476291 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/861147df-48f1-4c7d-b05e-a3eddec517e1-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.476302 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/861147df-48f1-4c7d-b05e-a3eddec517e1-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.477026 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-dns-svc\") pod \"dnsmasq-dns-8554648995-m8rq9\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.477310 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-m8rq9\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.478175 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-m8rq9\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.479120 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/861147df-48f1-4c7d-b05e-a3eddec517e1-kube-api-access-fx64m" (OuterVolumeSpecName: "kube-api-access-fx64m") pod "861147df-48f1-4c7d-b05e-a3eddec517e1" (UID: "861147df-48f1-4c7d-b05e-a3eddec517e1"). InnerVolumeSpecName "kube-api-access-fx64m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.493197 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-config\") pod \"dnsmasq-dns-8554648995-m8rq9\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.504745 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fzmj\" (UniqueName: \"kubernetes.io/projected/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-kube-api-access-9fzmj\") pod \"dnsmasq-dns-8554648995-m8rq9\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.554707 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-zl8dt" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.561347 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.565242 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-zl8dt" event={"ID":"5fbb4ae3-ae41-4be2-8f32-862531ae0737","Type":"ContainerDied","Data":"7964d472251e0feeb17fb531c0cdc66538389bff12f12aaf5912c99ce7f0bbf2"} Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.565347 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-zl8dt" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.572782 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-drj4p" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.574847 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-drj4p" event={"ID":"861147df-48f1-4c7d-b05e-a3eddec517e1","Type":"ContainerDied","Data":"9c246fb058f6394a09a2e3ebb1c818bb3dff31325fb600b2d99c8c2461682206"} Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.578829 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fx64m\" (UniqueName: \"kubernetes.io/projected/861147df-48f1-4c7d-b05e-a3eddec517e1-kube-api-access-fx64m\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.651892 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-drj4p"] Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.655521 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-drj4p"] Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.679826 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hwmgj\" (UniqueName: \"kubernetes.io/projected/5fbb4ae3-ae41-4be2-8f32-862531ae0737-kube-api-access-hwmgj\") pod \"5fbb4ae3-ae41-4be2-8f32-862531ae0737\" (UID: \"5fbb4ae3-ae41-4be2-8f32-862531ae0737\") " Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.679876 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fbb4ae3-ae41-4be2-8f32-862531ae0737-config\") pod \"5fbb4ae3-ae41-4be2-8f32-862531ae0737\" (UID: \"5fbb4ae3-ae41-4be2-8f32-862531ae0737\") " Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.679955 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5fbb4ae3-ae41-4be2-8f32-862531ae0737-dns-svc\") pod \"5fbb4ae3-ae41-4be2-8f32-862531ae0737\" (UID: \"5fbb4ae3-ae41-4be2-8f32-862531ae0737\") " Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.685512 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5fbb4ae3-ae41-4be2-8f32-862531ae0737-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5fbb4ae3-ae41-4be2-8f32-862531ae0737" (UID: "5fbb4ae3-ae41-4be2-8f32-862531ae0737"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.686217 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5fbb4ae3-ae41-4be2-8f32-862531ae0737-config" (OuterVolumeSpecName: "config") pod "5fbb4ae3-ae41-4be2-8f32-862531ae0737" (UID: "5fbb4ae3-ae41-4be2-8f32-862531ae0737"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.691418 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fbb4ae3-ae41-4be2-8f32-862531ae0737-kube-api-access-hwmgj" (OuterVolumeSpecName: "kube-api-access-hwmgj") pod "5fbb4ae3-ae41-4be2-8f32-862531ae0737" (UID: "5fbb4ae3-ae41-4be2-8f32-862531ae0737"). InnerVolumeSpecName "kube-api-access-hwmgj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.721505 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.781289 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hwmgj\" (UniqueName: \"kubernetes.io/projected/5fbb4ae3-ae41-4be2-8f32-862531ae0737-kube-api-access-hwmgj\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.781318 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fbb4ae3-ae41-4be2-8f32-862531ae0737-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.781327 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5fbb4ae3-ae41-4be2-8f32-862531ae0737-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.839988 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-m48cv"] Nov 28 13:39:15 crc kubenswrapper[4857]: W1128 13:39:15.841467 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfb09eb7d_1f7b_4613_900e_13de7585edef.slice/crio-e71e86fe485d8efaee1ab2ba81a196f40ca4cfecb3f510f19918545fa4d9b61a WatchSource:0}: Error finding container e71e86fe485d8efaee1ab2ba81a196f40ca4cfecb3f510f19918545fa4d9b61a: Status 404 returned error can't find the container with id e71e86fe485d8efaee1ab2ba81a196f40ca4cfecb3f510f19918545fa4d9b61a Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.930414 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-zl8dt"] Nov 28 13:39:15 crc kubenswrapper[4857]: I1128 13:39:15.937718 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-zl8dt"] Nov 28 13:39:16 crc kubenswrapper[4857]: W1128 13:39:16.021673 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8bb8cc13_eda7_4c41_9878_77ddabd55f4b.slice/crio-ca0c60f161c0f6c7d1f1e262afdaa8650aa63163101e99fdf662fb1788ac9c9b WatchSource:0}: Error finding container ca0c60f161c0f6c7d1f1e262afdaa8650aa63163101e99fdf662fb1788ac9c9b: Status 404 returned error can't find the container with id ca0c60f161c0f6c7d1f1e262afdaa8650aa63163101e99fdf662fb1788ac9c9b Nov 28 13:39:16 crc kubenswrapper[4857]: I1128 13:39:16.024702 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-k7b77"] Nov 28 13:39:16 crc kubenswrapper[4857]: W1128 13:39:16.107207 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod076d849e_fd88_4add_a5f9_e45a1983a606.slice/crio-edc48bdbd461607ee0559ddb2eb3267fcf1ec84a5868cd1943eee6b28324623c WatchSource:0}: Error finding container edc48bdbd461607ee0559ddb2eb3267fcf1ec84a5868cd1943eee6b28324623c: Status 404 returned error can't find the container with id edc48bdbd461607ee0559ddb2eb3267fcf1ec84a5868cd1943eee6b28324623c Nov 28 13:39:16 crc kubenswrapper[4857]: I1128 13:39:16.107350 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 13:39:16 crc kubenswrapper[4857]: W1128 13:39:16.224806 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeb8b0164_6add_4d4d_81d6_2a9e5814bbc4.slice/crio-17608b859ba8f5bf5b77c14be1cac8000c8e465619db7f2829c25aa8ebf3b6b1 WatchSource:0}: Error finding container 17608b859ba8f5bf5b77c14be1cac8000c8e465619db7f2829c25aa8ebf3b6b1: Status 404 returned error can't find the container with id 17608b859ba8f5bf5b77c14be1cac8000c8e465619db7f2829c25aa8ebf3b6b1 Nov 28 13:39:16 crc kubenswrapper[4857]: I1128 13:39:16.225604 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-m8rq9"] Nov 28 13:39:16 crc kubenswrapper[4857]: I1128 13:39:16.319931 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fbb4ae3-ae41-4be2-8f32-862531ae0737" path="/var/lib/kubelet/pods/5fbb4ae3-ae41-4be2-8f32-862531ae0737/volumes" Nov 28 13:39:16 crc kubenswrapper[4857]: I1128 13:39:16.320392 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="861147df-48f1-4c7d-b05e-a3eddec517e1" path="/var/lib/kubelet/pods/861147df-48f1-4c7d-b05e-a3eddec517e1/volumes" Nov 28 13:39:16 crc kubenswrapper[4857]: I1128 13:39:16.582996 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" event={"ID":"fb09eb7d-1f7b-4613-900e-13de7585edef","Type":"ContainerStarted","Data":"e71e86fe485d8efaee1ab2ba81a196f40ca4cfecb3f510f19918545fa4d9b61a"} Nov 28 13:39:16 crc kubenswrapper[4857]: I1128 13:39:16.584364 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-m8rq9" event={"ID":"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4","Type":"ContainerStarted","Data":"17608b859ba8f5bf5b77c14be1cac8000c8e465619db7f2829c25aa8ebf3b6b1"} Nov 28 13:39:16 crc kubenswrapper[4857]: I1128 13:39:16.586299 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-k7b77" event={"ID":"8bb8cc13-eda7-4c41-9878-77ddabd55f4b","Type":"ContainerStarted","Data":"c9dfde4fc40c233928885d1fb977721a4b1687b44081115fcf5a00db9ce6907f"} Nov 28 13:39:16 crc kubenswrapper[4857]: I1128 13:39:16.586322 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-k7b77" event={"ID":"8bb8cc13-eda7-4c41-9878-77ddabd55f4b","Type":"ContainerStarted","Data":"ca0c60f161c0f6c7d1f1e262afdaa8650aa63163101e99fdf662fb1788ac9c9b"} Nov 28 13:39:16 crc kubenswrapper[4857]: I1128 13:39:16.589202 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"076d849e-fd88-4add-a5f9-e45a1983a606","Type":"ContainerStarted","Data":"edc48bdbd461607ee0559ddb2eb3267fcf1ec84a5868cd1943eee6b28324623c"} Nov 28 13:39:16 crc kubenswrapper[4857]: I1128 13:39:16.610394 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-k7b77" podStartSLOduration=1.610373022 podStartE2EDuration="1.610373022s" podCreationTimestamp="2025-11-28 13:39:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:39:16.607707115 +0000 UTC m=+1248.635082372" watchObservedRunningTime="2025-11-28 13:39:16.610373022 +0000 UTC m=+1248.637748189" Nov 28 13:39:17 crc kubenswrapper[4857]: I1128 13:39:17.598326 4857 generic.go:334] "Generic (PLEG): container finished" podID="eb8b0164-6add-4d4d-81d6-2a9e5814bbc4" containerID="ebb0c77c3a132b40ddb14f7116fa5c94fbff77ab6d399bfdfdd415f39ad76508" exitCode=0 Nov 28 13:39:17 crc kubenswrapper[4857]: I1128 13:39:17.598392 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-m8rq9" event={"ID":"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4","Type":"ContainerDied","Data":"ebb0c77c3a132b40ddb14f7116fa5c94fbff77ab6d399bfdfdd415f39ad76508"} Nov 28 13:39:17 crc kubenswrapper[4857]: I1128 13:39:17.603648 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"076d849e-fd88-4add-a5f9-e45a1983a606","Type":"ContainerStarted","Data":"0d984f57d2f9c989b335dc40eddb0295b7c07a2ff3153367f9c77e845c49ab2d"} Nov 28 13:39:17 crc kubenswrapper[4857]: I1128 13:39:17.603705 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"076d849e-fd88-4add-a5f9-e45a1983a606","Type":"ContainerStarted","Data":"d8bcc929d2d6bab5c64bef5dbfc2e3fbdfbb52f64ab6c01fd92f824932851397"} Nov 28 13:39:17 crc kubenswrapper[4857]: I1128 13:39:17.603862 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 28 13:39:17 crc kubenswrapper[4857]: I1128 13:39:17.604025 4857 generic.go:334] "Generic (PLEG): container finished" podID="fb09eb7d-1f7b-4613-900e-13de7585edef" containerID="ed79b812c0c207711b11a01129dcb5f4901f87d22a6ee2ae72224edcb83ad121" exitCode=0 Nov 28 13:39:17 crc kubenswrapper[4857]: I1128 13:39:17.604287 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" event={"ID":"fb09eb7d-1f7b-4613-900e-13de7585edef","Type":"ContainerDied","Data":"ed79b812c0c207711b11a01129dcb5f4901f87d22a6ee2ae72224edcb83ad121"} Nov 28 13:39:17 crc kubenswrapper[4857]: I1128 13:39:17.645284 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=1.6512799070000002 podStartE2EDuration="2.645263054s" podCreationTimestamp="2025-11-28 13:39:15 +0000 UTC" firstStartedPulling="2025-11-28 13:39:16.111063392 +0000 UTC m=+1248.138438569" lastFinishedPulling="2025-11-28 13:39:17.105046549 +0000 UTC m=+1249.132421716" observedRunningTime="2025-11-28 13:39:17.639401114 +0000 UTC m=+1249.666776271" watchObservedRunningTime="2025-11-28 13:39:17.645263054 +0000 UTC m=+1249.672638221" Nov 28 13:39:18 crc kubenswrapper[4857]: I1128 13:39:18.633351 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" event={"ID":"fb09eb7d-1f7b-4613-900e-13de7585edef","Type":"ContainerStarted","Data":"4c8ef78e5ebd9746257e5142ab650ba5f7fcf502cfa3c4479926e6293dabf8f4"} Nov 28 13:39:18 crc kubenswrapper[4857]: I1128 13:39:18.633931 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" Nov 28 13:39:18 crc kubenswrapper[4857]: I1128 13:39:18.636670 4857 generic.go:334] "Generic (PLEG): container finished" podID="7b0c1834-7ece-4d9c-9cf1-28a53aea280e" containerID="f3d21b615ee4666a889ddc77f9bc3da38000a4886f57b39297860f562322b2b3" exitCode=0 Nov 28 13:39:18 crc kubenswrapper[4857]: I1128 13:39:18.636822 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7b0c1834-7ece-4d9c-9cf1-28a53aea280e","Type":"ContainerDied","Data":"f3d21b615ee4666a889ddc77f9bc3da38000a4886f57b39297860f562322b2b3"} Nov 28 13:39:18 crc kubenswrapper[4857]: I1128 13:39:18.639782 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-m8rq9" event={"ID":"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4","Type":"ContainerStarted","Data":"f5da92f08802942713f8bc36dd148ab39e7986210811f457a325c152ded87c9b"} Nov 28 13:39:18 crc kubenswrapper[4857]: I1128 13:39:18.640519 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:18 crc kubenswrapper[4857]: I1128 13:39:18.644872 4857 generic.go:334] "Generic (PLEG): container finished" podID="41687469-06d7-47ab-ad25-d32df165e1e2" containerID="c3560dc43f3cda5bfbcfd827097587a99cc977e3650dcfdd935c96bc56677b06" exitCode=0 Nov 28 13:39:18 crc kubenswrapper[4857]: I1128 13:39:18.645738 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"41687469-06d7-47ab-ad25-d32df165e1e2","Type":"ContainerDied","Data":"c3560dc43f3cda5bfbcfd827097587a99cc977e3650dcfdd935c96bc56677b06"} Nov 28 13:39:18 crc kubenswrapper[4857]: I1128 13:39:18.664952 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" podStartSLOduration=4.020479454 podStartE2EDuration="4.664920255s" podCreationTimestamp="2025-11-28 13:39:14 +0000 UTC" firstStartedPulling="2025-11-28 13:39:15.84462465 +0000 UTC m=+1247.871999817" lastFinishedPulling="2025-11-28 13:39:16.489065451 +0000 UTC m=+1248.516440618" observedRunningTime="2025-11-28 13:39:18.656032808 +0000 UTC m=+1250.683407985" watchObservedRunningTime="2025-11-28 13:39:18.664920255 +0000 UTC m=+1250.692295432" Nov 28 13:39:18 crc kubenswrapper[4857]: I1128 13:39:18.742714 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-m8rq9" podStartSLOduration=3.187579121 podStartE2EDuration="3.742698826s" podCreationTimestamp="2025-11-28 13:39:15 +0000 UTC" firstStartedPulling="2025-11-28 13:39:16.227017938 +0000 UTC m=+1248.254393105" lastFinishedPulling="2025-11-28 13:39:16.782137633 +0000 UTC m=+1248.809512810" observedRunningTime="2025-11-28 13:39:18.738565937 +0000 UTC m=+1250.765941104" watchObservedRunningTime="2025-11-28 13:39:18.742698826 +0000 UTC m=+1250.770073993" Nov 28 13:39:19 crc kubenswrapper[4857]: I1128 13:39:19.658375 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7b0c1834-7ece-4d9c-9cf1-28a53aea280e","Type":"ContainerStarted","Data":"1d33095c862babd8cc0dde10c6cb07ce5a2d5f837c975bd8aca7a7cf1568e774"} Nov 28 13:39:19 crc kubenswrapper[4857]: I1128 13:39:19.662901 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"41687469-06d7-47ab-ad25-d32df165e1e2","Type":"ContainerStarted","Data":"9edd8ca732119343257da06c9d3c8090ac7032d415e0af5cc821df9c9c20bf76"} Nov 28 13:39:19 crc kubenswrapper[4857]: I1128 13:39:19.695141 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=-9223371989.159657 podStartE2EDuration="47.695120192s" podCreationTimestamp="2025-11-28 13:38:32 +0000 UTC" firstStartedPulling="2025-11-28 13:38:34.775860317 +0000 UTC m=+1206.803235484" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:39:19.692430894 +0000 UTC m=+1251.719806061" watchObservedRunningTime="2025-11-28 13:39:19.695120192 +0000 UTC m=+1251.722495359" Nov 28 13:39:19 crc kubenswrapper[4857]: I1128 13:39:19.721196 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=8.067080316 podStartE2EDuration="46.721173676s" podCreationTimestamp="2025-11-28 13:38:33 +0000 UTC" firstStartedPulling="2025-11-28 13:38:35.538505646 +0000 UTC m=+1207.565880813" lastFinishedPulling="2025-11-28 13:39:14.192599006 +0000 UTC m=+1246.219974173" observedRunningTime="2025-11-28 13:39:19.715458231 +0000 UTC m=+1251.742833388" watchObservedRunningTime="2025-11-28 13:39:19.721173676 +0000 UTC m=+1251.748548843" Nov 28 13:39:20 crc kubenswrapper[4857]: I1128 13:39:20.056570 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 28 13:39:23 crc kubenswrapper[4857]: I1128 13:39:23.699267 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"db66469a-ca4a-4f4b-b657-70bf41cd45db","Type":"ContainerStarted","Data":"df790fb9ecc64103ecf13f34ef5d893a9319a46e50680cc017a6bb17fda529ce"} Nov 28 13:39:23 crc kubenswrapper[4857]: I1128 13:39:23.700288 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 13:39:23 crc kubenswrapper[4857]: I1128 13:39:23.724661 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.857559566 podStartE2EDuration="47.724633514s" podCreationTimestamp="2025-11-28 13:38:36 +0000 UTC" firstStartedPulling="2025-11-28 13:38:37.716991486 +0000 UTC m=+1209.744366653" lastFinishedPulling="2025-11-28 13:39:22.584065434 +0000 UTC m=+1254.611440601" observedRunningTime="2025-11-28 13:39:23.712962256 +0000 UTC m=+1255.740337463" watchObservedRunningTime="2025-11-28 13:39:23.724633514 +0000 UTC m=+1255.752008721" Nov 28 13:39:23 crc kubenswrapper[4857]: I1128 13:39:23.904911 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 28 13:39:23 crc kubenswrapper[4857]: I1128 13:39:23.905450 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 28 13:39:23 crc kubenswrapper[4857]: I1128 13:39:23.995448 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 28 13:39:24 crc kubenswrapper[4857]: I1128 13:39:24.800179 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 28 13:39:24 crc kubenswrapper[4857]: I1128 13:39:24.844472 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 28 13:39:24 crc kubenswrapper[4857]: I1128 13:39:24.844510 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.043935 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-z8bzr"] Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.045110 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-z8bzr" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.054367 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-z8bzr"] Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.146843 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bh9k\" (UniqueName: \"kubernetes.io/projected/dff509a4-6719-4a14-9a20-f07a13717d90-kube-api-access-7bh9k\") pod \"placement-db-create-z8bzr\" (UID: \"dff509a4-6719-4a14-9a20-f07a13717d90\") " pod="openstack/placement-db-create-z8bzr" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.146948 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dff509a4-6719-4a14-9a20-f07a13717d90-operator-scripts\") pod \"placement-db-create-z8bzr\" (UID: \"dff509a4-6719-4a14-9a20-f07a13717d90\") " pod="openstack/placement-db-create-z8bzr" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.157145 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-cdcd-account-create-update-ljmhf"] Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.158210 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-cdcd-account-create-update-ljmhf" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.164195 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.171128 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-cdcd-account-create-update-ljmhf"] Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.188949 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.248788 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7j9g5\" (UniqueName: \"kubernetes.io/projected/f33c02be-6f4d-4b53-b1fa-4b97297bde64-kube-api-access-7j9g5\") pod \"placement-cdcd-account-create-update-ljmhf\" (UID: \"f33c02be-6f4d-4b53-b1fa-4b97297bde64\") " pod="openstack/placement-cdcd-account-create-update-ljmhf" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.248927 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f33c02be-6f4d-4b53-b1fa-4b97297bde64-operator-scripts\") pod \"placement-cdcd-account-create-update-ljmhf\" (UID: \"f33c02be-6f4d-4b53-b1fa-4b97297bde64\") " pod="openstack/placement-cdcd-account-create-update-ljmhf" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.249007 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bh9k\" (UniqueName: \"kubernetes.io/projected/dff509a4-6719-4a14-9a20-f07a13717d90-kube-api-access-7bh9k\") pod \"placement-db-create-z8bzr\" (UID: \"dff509a4-6719-4a14-9a20-f07a13717d90\") " pod="openstack/placement-db-create-z8bzr" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.249035 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dff509a4-6719-4a14-9a20-f07a13717d90-operator-scripts\") pod \"placement-db-create-z8bzr\" (UID: \"dff509a4-6719-4a14-9a20-f07a13717d90\") " pod="openstack/placement-db-create-z8bzr" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.249899 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dff509a4-6719-4a14-9a20-f07a13717d90-operator-scripts\") pod \"placement-db-create-z8bzr\" (UID: \"dff509a4-6719-4a14-9a20-f07a13717d90\") " pod="openstack/placement-db-create-z8bzr" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.289026 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bh9k\" (UniqueName: \"kubernetes.io/projected/dff509a4-6719-4a14-9a20-f07a13717d90-kube-api-access-7bh9k\") pod \"placement-db-create-z8bzr\" (UID: \"dff509a4-6719-4a14-9a20-f07a13717d90\") " pod="openstack/placement-db-create-z8bzr" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.350880 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7j9g5\" (UniqueName: \"kubernetes.io/projected/f33c02be-6f4d-4b53-b1fa-4b97297bde64-kube-api-access-7j9g5\") pod \"placement-cdcd-account-create-update-ljmhf\" (UID: \"f33c02be-6f4d-4b53-b1fa-4b97297bde64\") " pod="openstack/placement-cdcd-account-create-update-ljmhf" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.351007 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f33c02be-6f4d-4b53-b1fa-4b97297bde64-operator-scripts\") pod \"placement-cdcd-account-create-update-ljmhf\" (UID: \"f33c02be-6f4d-4b53-b1fa-4b97297bde64\") " pod="openstack/placement-cdcd-account-create-update-ljmhf" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.352077 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f33c02be-6f4d-4b53-b1fa-4b97297bde64-operator-scripts\") pod \"placement-cdcd-account-create-update-ljmhf\" (UID: \"f33c02be-6f4d-4b53-b1fa-4b97297bde64\") " pod="openstack/placement-cdcd-account-create-update-ljmhf" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.365356 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-z8bzr" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.374991 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7j9g5\" (UniqueName: \"kubernetes.io/projected/f33c02be-6f4d-4b53-b1fa-4b97297bde64-kube-api-access-7j9g5\") pod \"placement-cdcd-account-create-update-ljmhf\" (UID: \"f33c02be-6f4d-4b53-b1fa-4b97297bde64\") " pod="openstack/placement-cdcd-account-create-update-ljmhf" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.474511 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-cdcd-account-create-update-ljmhf" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.724033 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.787467 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-m48cv"] Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.787714 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" podUID="fb09eb7d-1f7b-4613-900e-13de7585edef" containerName="dnsmasq-dns" containerID="cri-o://4c8ef78e5ebd9746257e5142ab650ba5f7fcf502cfa3c4479926e6293dabf8f4" gracePeriod=10 Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.808843 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-cdcd-account-create-update-ljmhf"] Nov 28 13:39:25 crc kubenswrapper[4857]: I1128 13:39:25.905681 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-z8bzr"] Nov 28 13:39:25 crc kubenswrapper[4857]: W1128 13:39:25.913936 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddff509a4_6719_4a14_9a20_f07a13717d90.slice/crio-8eea1328074f90e5881143d7b1b2a56f9385b36ef44247bb7012727a0f46d58e WatchSource:0}: Error finding container 8eea1328074f90e5881143d7b1b2a56f9385b36ef44247bb7012727a0f46d58e: Status 404 returned error can't find the container with id 8eea1328074f90e5881143d7b1b2a56f9385b36ef44247bb7012727a0f46d58e Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.218199 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.266501 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fb09eb7d-1f7b-4613-900e-13de7585edef-ovsdbserver-sb\") pod \"fb09eb7d-1f7b-4613-900e-13de7585edef\" (UID: \"fb09eb7d-1f7b-4613-900e-13de7585edef\") " Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.266600 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fb09eb7d-1f7b-4613-900e-13de7585edef-dns-svc\") pod \"fb09eb7d-1f7b-4613-900e-13de7585edef\" (UID: \"fb09eb7d-1f7b-4613-900e-13de7585edef\") " Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.266663 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ghtmx\" (UniqueName: \"kubernetes.io/projected/fb09eb7d-1f7b-4613-900e-13de7585edef-kube-api-access-ghtmx\") pod \"fb09eb7d-1f7b-4613-900e-13de7585edef\" (UID: \"fb09eb7d-1f7b-4613-900e-13de7585edef\") " Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.266732 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb09eb7d-1f7b-4613-900e-13de7585edef-config\") pod \"fb09eb7d-1f7b-4613-900e-13de7585edef\" (UID: \"fb09eb7d-1f7b-4613-900e-13de7585edef\") " Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.272107 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb09eb7d-1f7b-4613-900e-13de7585edef-kube-api-access-ghtmx" (OuterVolumeSpecName: "kube-api-access-ghtmx") pod "fb09eb7d-1f7b-4613-900e-13de7585edef" (UID: "fb09eb7d-1f7b-4613-900e-13de7585edef"). InnerVolumeSpecName "kube-api-access-ghtmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.308196 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb09eb7d-1f7b-4613-900e-13de7585edef-config" (OuterVolumeSpecName: "config") pod "fb09eb7d-1f7b-4613-900e-13de7585edef" (UID: "fb09eb7d-1f7b-4613-900e-13de7585edef"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.308902 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb09eb7d-1f7b-4613-900e-13de7585edef-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "fb09eb7d-1f7b-4613-900e-13de7585edef" (UID: "fb09eb7d-1f7b-4613-900e-13de7585edef"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.316452 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb09eb7d-1f7b-4613-900e-13de7585edef-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "fb09eb7d-1f7b-4613-900e-13de7585edef" (UID: "fb09eb7d-1f7b-4613-900e-13de7585edef"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.368553 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fb09eb7d-1f7b-4613-900e-13de7585edef-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.368578 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fb09eb7d-1f7b-4613-900e-13de7585edef-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.368589 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ghtmx\" (UniqueName: \"kubernetes.io/projected/fb09eb7d-1f7b-4613-900e-13de7585edef-kube-api-access-ghtmx\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.368600 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb09eb7d-1f7b-4613-900e-13de7585edef-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.729331 4857 generic.go:334] "Generic (PLEG): container finished" podID="fb09eb7d-1f7b-4613-900e-13de7585edef" containerID="4c8ef78e5ebd9746257e5142ab650ba5f7fcf502cfa3c4479926e6293dabf8f4" exitCode=0 Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.729406 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" event={"ID":"fb09eb7d-1f7b-4613-900e-13de7585edef","Type":"ContainerDied","Data":"4c8ef78e5ebd9746257e5142ab650ba5f7fcf502cfa3c4479926e6293dabf8f4"} Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.729439 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" event={"ID":"fb09eb7d-1f7b-4613-900e-13de7585edef","Type":"ContainerDied","Data":"e71e86fe485d8efaee1ab2ba81a196f40ca4cfecb3f510f19918545fa4d9b61a"} Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.729462 4857 scope.go:117] "RemoveContainer" containerID="4c8ef78e5ebd9746257e5142ab650ba5f7fcf502cfa3c4479926e6293dabf8f4" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.729608 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-m48cv" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.752609 4857 generic.go:334] "Generic (PLEG): container finished" podID="f33c02be-6f4d-4b53-b1fa-4b97297bde64" containerID="6003a7f8ab27a4820565904d5d128a843aa2e8125b96ec24769c383a1f6201e3" exitCode=0 Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.753024 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-cdcd-account-create-update-ljmhf" event={"ID":"f33c02be-6f4d-4b53-b1fa-4b97297bde64","Type":"ContainerDied","Data":"6003a7f8ab27a4820565904d5d128a843aa2e8125b96ec24769c383a1f6201e3"} Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.753078 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-cdcd-account-create-update-ljmhf" event={"ID":"f33c02be-6f4d-4b53-b1fa-4b97297bde64","Type":"ContainerStarted","Data":"336d98f39ea7eb1c16746641041802bc860a518ea40f56a6a973729e21b89fa2"} Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.764253 4857 generic.go:334] "Generic (PLEG): container finished" podID="dff509a4-6719-4a14-9a20-f07a13717d90" containerID="3284e67be7e4ba9585a3ba4373dbece92afd37195784b345ddad64c4d295b925" exitCode=0 Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.765735 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-z8bzr" event={"ID":"dff509a4-6719-4a14-9a20-f07a13717d90","Type":"ContainerDied","Data":"3284e67be7e4ba9585a3ba4373dbece92afd37195784b345ddad64c4d295b925"} Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.765825 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-z8bzr" event={"ID":"dff509a4-6719-4a14-9a20-f07a13717d90","Type":"ContainerStarted","Data":"8eea1328074f90e5881143d7b1b2a56f9385b36ef44247bb7012727a0f46d58e"} Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.778266 4857 scope.go:117] "RemoveContainer" containerID="ed79b812c0c207711b11a01129dcb5f4901f87d22a6ee2ae72224edcb83ad121" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.782066 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-m48cv"] Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.801941 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-m48cv"] Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.830892 4857 scope.go:117] "RemoveContainer" containerID="4c8ef78e5ebd9746257e5142ab650ba5f7fcf502cfa3c4479926e6293dabf8f4" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.833363 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-ngqsk"] Nov 28 13:39:26 crc kubenswrapper[4857]: E1128 13:39:26.835795 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb09eb7d-1f7b-4613-900e-13de7585edef" containerName="init" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.835817 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb09eb7d-1f7b-4613-900e-13de7585edef" containerName="init" Nov 28 13:39:26 crc kubenswrapper[4857]: E1128 13:39:26.835851 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb09eb7d-1f7b-4613-900e-13de7585edef" containerName="dnsmasq-dns" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.835858 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb09eb7d-1f7b-4613-900e-13de7585edef" containerName="dnsmasq-dns" Nov 28 13:39:26 crc kubenswrapper[4857]: E1128 13:39:26.835939 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c8ef78e5ebd9746257e5142ab650ba5f7fcf502cfa3c4479926e6293dabf8f4\": container with ID starting with 4c8ef78e5ebd9746257e5142ab650ba5f7fcf502cfa3c4479926e6293dabf8f4 not found: ID does not exist" containerID="4c8ef78e5ebd9746257e5142ab650ba5f7fcf502cfa3c4479926e6293dabf8f4" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.835995 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c8ef78e5ebd9746257e5142ab650ba5f7fcf502cfa3c4479926e6293dabf8f4"} err="failed to get container status \"4c8ef78e5ebd9746257e5142ab650ba5f7fcf502cfa3c4479926e6293dabf8f4\": rpc error: code = NotFound desc = could not find container \"4c8ef78e5ebd9746257e5142ab650ba5f7fcf502cfa3c4479926e6293dabf8f4\": container with ID starting with 4c8ef78e5ebd9746257e5142ab650ba5f7fcf502cfa3c4479926e6293dabf8f4 not found: ID does not exist" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.836026 4857 scope.go:117] "RemoveContainer" containerID="ed79b812c0c207711b11a01129dcb5f4901f87d22a6ee2ae72224edcb83ad121" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.836060 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb09eb7d-1f7b-4613-900e-13de7585edef" containerName="dnsmasq-dns" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.836938 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:26 crc kubenswrapper[4857]: E1128 13:39:26.840918 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed79b812c0c207711b11a01129dcb5f4901f87d22a6ee2ae72224edcb83ad121\": container with ID starting with ed79b812c0c207711b11a01129dcb5f4901f87d22a6ee2ae72224edcb83ad121 not found: ID does not exist" containerID="ed79b812c0c207711b11a01129dcb5f4901f87d22a6ee2ae72224edcb83ad121" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.840982 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed79b812c0c207711b11a01129dcb5f4901f87d22a6ee2ae72224edcb83ad121"} err="failed to get container status \"ed79b812c0c207711b11a01129dcb5f4901f87d22a6ee2ae72224edcb83ad121\": rpc error: code = NotFound desc = could not find container \"ed79b812c0c207711b11a01129dcb5f4901f87d22a6ee2ae72224edcb83ad121\": container with ID starting with ed79b812c0c207711b11a01129dcb5f4901f87d22a6ee2ae72224edcb83ad121 not found: ID does not exist" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.886564 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-ngqsk"] Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.979518 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdgkb\" (UniqueName: \"kubernetes.io/projected/c61bf456-25ac-453e-87cf-e0694d637c22-kube-api-access-zdgkb\") pod \"dnsmasq-dns-b8fbc5445-ngqsk\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.979566 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-ngqsk\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.979593 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-ngqsk\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.979732 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-config\") pod \"dnsmasq-dns-b8fbc5445-ngqsk\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:26 crc kubenswrapper[4857]: I1128 13:39:26.979839 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-ngqsk\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.063791 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.081362 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-config\") pod \"dnsmasq-dns-b8fbc5445-ngqsk\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.081447 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-ngqsk\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.081548 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdgkb\" (UniqueName: \"kubernetes.io/projected/c61bf456-25ac-453e-87cf-e0694d637c22-kube-api-access-zdgkb\") pod \"dnsmasq-dns-b8fbc5445-ngqsk\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.081575 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-ngqsk\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.081605 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-ngqsk\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.082555 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-config\") pod \"dnsmasq-dns-b8fbc5445-ngqsk\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.084165 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-ngqsk\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.084742 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-ngqsk\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.084837 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-ngqsk\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.104415 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdgkb\" (UniqueName: \"kubernetes.io/projected/c61bf456-25ac-453e-87cf-e0694d637c22-kube-api-access-zdgkb\") pod \"dnsmasq-dns-b8fbc5445-ngqsk\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.136945 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.177825 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.635812 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-ngqsk"] Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.772922 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" event={"ID":"c61bf456-25ac-453e-87cf-e0694d637c22","Type":"ContainerStarted","Data":"b51068fb79628ae3750196307117b007388c92165f008e001ab06ac088c5dec7"} Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.962333 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.979196 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.983582 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-jh8ng" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.983765 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.983744 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.983821 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.997137 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.997188 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wn8tz\" (UniqueName: \"kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-kube-api-access-wn8tz\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.997254 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.997308 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/d5b28d1e-e702-4528-9964-72ad176a20b3-lock\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:27 crc kubenswrapper[4857]: I1128 13:39:27.997332 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/d5b28d1e-e702-4528-9964-72ad176a20b3-cache\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.004544 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.099045 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/d5b28d1e-e702-4528-9964-72ad176a20b3-lock\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.099101 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/d5b28d1e-e702-4528-9964-72ad176a20b3-cache\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.099196 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.099257 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wn8tz\" (UniqueName: \"kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-kube-api-access-wn8tz\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.099692 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/swift-storage-0" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.099917 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/d5b28d1e-e702-4528-9964-72ad176a20b3-lock\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.099991 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:28 crc kubenswrapper[4857]: E1128 13:39:28.100147 4857 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 13:39:28 crc kubenswrapper[4857]: E1128 13:39:28.100246 4857 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 13:39:28 crc kubenswrapper[4857]: E1128 13:39:28.100304 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift podName:d5b28d1e-e702-4528-9964-72ad176a20b3 nodeName:}" failed. No retries permitted until 2025-11-28 13:39:28.600282345 +0000 UTC m=+1260.627657512 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift") pod "swift-storage-0" (UID: "d5b28d1e-e702-4528-9964-72ad176a20b3") : configmap "swift-ring-files" not found Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.100466 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/d5b28d1e-e702-4528-9964-72ad176a20b3-cache\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.123444 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-cdcd-account-create-update-ljmhf" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.123993 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wn8tz\" (UniqueName: \"kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-kube-api-access-wn8tz\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.126069 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.157989 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-z8bzr" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.305227 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7j9g5\" (UniqueName: \"kubernetes.io/projected/f33c02be-6f4d-4b53-b1fa-4b97297bde64-kube-api-access-7j9g5\") pod \"f33c02be-6f4d-4b53-b1fa-4b97297bde64\" (UID: \"f33c02be-6f4d-4b53-b1fa-4b97297bde64\") " Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.305396 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dff509a4-6719-4a14-9a20-f07a13717d90-operator-scripts\") pod \"dff509a4-6719-4a14-9a20-f07a13717d90\" (UID: \"dff509a4-6719-4a14-9a20-f07a13717d90\") " Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.305444 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7bh9k\" (UniqueName: \"kubernetes.io/projected/dff509a4-6719-4a14-9a20-f07a13717d90-kube-api-access-7bh9k\") pod \"dff509a4-6719-4a14-9a20-f07a13717d90\" (UID: \"dff509a4-6719-4a14-9a20-f07a13717d90\") " Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.305863 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f33c02be-6f4d-4b53-b1fa-4b97297bde64-operator-scripts\") pod \"f33c02be-6f4d-4b53-b1fa-4b97297bde64\" (UID: \"f33c02be-6f4d-4b53-b1fa-4b97297bde64\") " Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.306121 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dff509a4-6719-4a14-9a20-f07a13717d90-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dff509a4-6719-4a14-9a20-f07a13717d90" (UID: "dff509a4-6719-4a14-9a20-f07a13717d90"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.306803 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dff509a4-6719-4a14-9a20-f07a13717d90-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.306865 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f33c02be-6f4d-4b53-b1fa-4b97297bde64-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f33c02be-6f4d-4b53-b1fa-4b97297bde64" (UID: "f33c02be-6f4d-4b53-b1fa-4b97297bde64"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.310002 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dff509a4-6719-4a14-9a20-f07a13717d90-kube-api-access-7bh9k" (OuterVolumeSpecName: "kube-api-access-7bh9k") pod "dff509a4-6719-4a14-9a20-f07a13717d90" (UID: "dff509a4-6719-4a14-9a20-f07a13717d90"). InnerVolumeSpecName "kube-api-access-7bh9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.311226 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f33c02be-6f4d-4b53-b1fa-4b97297bde64-kube-api-access-7j9g5" (OuterVolumeSpecName: "kube-api-access-7j9g5") pod "f33c02be-6f4d-4b53-b1fa-4b97297bde64" (UID: "f33c02be-6f4d-4b53-b1fa-4b97297bde64"). InnerVolumeSpecName "kube-api-access-7j9g5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.332208 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb09eb7d-1f7b-4613-900e-13de7585edef" path="/var/lib/kubelet/pods/fb09eb7d-1f7b-4613-900e-13de7585edef/volumes" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.408761 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f33c02be-6f4d-4b53-b1fa-4b97297bde64-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.408807 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7j9g5\" (UniqueName: \"kubernetes.io/projected/f33c02be-6f4d-4b53-b1fa-4b97297bde64-kube-api-access-7j9g5\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.408819 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7bh9k\" (UniqueName: \"kubernetes.io/projected/dff509a4-6719-4a14-9a20-f07a13717d90-kube-api-access-7bh9k\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.438971 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-xhgkc"] Nov 28 13:39:28 crc kubenswrapper[4857]: E1128 13:39:28.439281 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f33c02be-6f4d-4b53-b1fa-4b97297bde64" containerName="mariadb-account-create-update" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.439301 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f33c02be-6f4d-4b53-b1fa-4b97297bde64" containerName="mariadb-account-create-update" Nov 28 13:39:28 crc kubenswrapper[4857]: E1128 13:39:28.439310 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dff509a4-6719-4a14-9a20-f07a13717d90" containerName="mariadb-database-create" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.439317 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="dff509a4-6719-4a14-9a20-f07a13717d90" containerName="mariadb-database-create" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.439479 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f33c02be-6f4d-4b53-b1fa-4b97297bde64" containerName="mariadb-account-create-update" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.439504 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="dff509a4-6719-4a14-9a20-f07a13717d90" containerName="mariadb-database-create" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.440022 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.444313 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.444537 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.450265 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.453943 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-xhgkc"] Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.611683 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b208ca3d-5127-4e9d-ba17-a68dc507f085-scripts\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.612164 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b208ca3d-5127-4e9d-ba17-a68dc507f085-swiftconf\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.612206 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.612237 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b208ca3d-5127-4e9d-ba17-a68dc507f085-ring-data-devices\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.612271 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdwx6\" (UniqueName: \"kubernetes.io/projected/b208ca3d-5127-4e9d-ba17-a68dc507f085-kube-api-access-kdwx6\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.612352 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b208ca3d-5127-4e9d-ba17-a68dc507f085-combined-ca-bundle\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: E1128 13:39:28.612462 4857 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 13:39:28 crc kubenswrapper[4857]: E1128 13:39:28.612492 4857 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.612495 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b208ca3d-5127-4e9d-ba17-a68dc507f085-dispersionconf\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: E1128 13:39:28.612547 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift podName:d5b28d1e-e702-4528-9964-72ad176a20b3 nodeName:}" failed. No retries permitted until 2025-11-28 13:39:29.612525561 +0000 UTC m=+1261.639900808 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift") pod "swift-storage-0" (UID: "d5b28d1e-e702-4528-9964-72ad176a20b3") : configmap "swift-ring-files" not found Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.612567 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b208ca3d-5127-4e9d-ba17-a68dc507f085-etc-swift\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.713862 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b208ca3d-5127-4e9d-ba17-a68dc507f085-dispersionconf\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.713916 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b208ca3d-5127-4e9d-ba17-a68dc507f085-etc-swift\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.713958 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b208ca3d-5127-4e9d-ba17-a68dc507f085-scripts\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.714009 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b208ca3d-5127-4e9d-ba17-a68dc507f085-swiftconf\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.714046 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b208ca3d-5127-4e9d-ba17-a68dc507f085-ring-data-devices\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.714077 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdwx6\" (UniqueName: \"kubernetes.io/projected/b208ca3d-5127-4e9d-ba17-a68dc507f085-kube-api-access-kdwx6\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.714120 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b208ca3d-5127-4e9d-ba17-a68dc507f085-combined-ca-bundle\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.714721 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b208ca3d-5127-4e9d-ba17-a68dc507f085-etc-swift\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.714905 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b208ca3d-5127-4e9d-ba17-a68dc507f085-scripts\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.715098 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b208ca3d-5127-4e9d-ba17-a68dc507f085-ring-data-devices\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.718333 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b208ca3d-5127-4e9d-ba17-a68dc507f085-dispersionconf\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.719052 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b208ca3d-5127-4e9d-ba17-a68dc507f085-combined-ca-bundle\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.719414 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b208ca3d-5127-4e9d-ba17-a68dc507f085-swiftconf\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.740870 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdwx6\" (UniqueName: \"kubernetes.io/projected/b208ca3d-5127-4e9d-ba17-a68dc507f085-kube-api-access-kdwx6\") pod \"swift-ring-rebalance-xhgkc\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.764598 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-jh8ng" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.772840 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.787403 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-z8bzr" event={"ID":"dff509a4-6719-4a14-9a20-f07a13717d90","Type":"ContainerDied","Data":"8eea1328074f90e5881143d7b1b2a56f9385b36ef44247bb7012727a0f46d58e"} Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.787453 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8eea1328074f90e5881143d7b1b2a56f9385b36ef44247bb7012727a0f46d58e" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.787503 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-z8bzr" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.790307 4857 generic.go:334] "Generic (PLEG): container finished" podID="c61bf456-25ac-453e-87cf-e0694d637c22" containerID="1432dae7d3eda2dff4167d74c6617d9c5bf396db3afbecfec74f584a276a961e" exitCode=0 Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.790398 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" event={"ID":"c61bf456-25ac-453e-87cf-e0694d637c22","Type":"ContainerDied","Data":"1432dae7d3eda2dff4167d74c6617d9c5bf396db3afbecfec74f584a276a961e"} Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.795296 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-cdcd-account-create-update-ljmhf" event={"ID":"f33c02be-6f4d-4b53-b1fa-4b97297bde64","Type":"ContainerDied","Data":"336d98f39ea7eb1c16746641041802bc860a518ea40f56a6a973729e21b89fa2"} Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.795327 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-cdcd-account-create-update-ljmhf" Nov 28 13:39:28 crc kubenswrapper[4857]: I1128 13:39:28.795350 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="336d98f39ea7eb1c16746641041802bc860a518ea40f56a6a973729e21b89fa2" Nov 28 13:39:29 crc kubenswrapper[4857]: I1128 13:39:29.245651 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-xhgkc"] Nov 28 13:39:29 crc kubenswrapper[4857]: W1128 13:39:29.252963 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb208ca3d_5127_4e9d_ba17_a68dc507f085.slice/crio-51e63bae5a9e13b73beb4bf98d146874e03ae9998a3b131b839483049f93aa5d WatchSource:0}: Error finding container 51e63bae5a9e13b73beb4bf98d146874e03ae9998a3b131b839483049f93aa5d: Status 404 returned error can't find the container with id 51e63bae5a9e13b73beb4bf98d146874e03ae9998a3b131b839483049f93aa5d Nov 28 13:39:29 crc kubenswrapper[4857]: I1128 13:39:29.631446 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:29 crc kubenswrapper[4857]: E1128 13:39:29.631709 4857 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 13:39:29 crc kubenswrapper[4857]: E1128 13:39:29.631742 4857 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 13:39:29 crc kubenswrapper[4857]: E1128 13:39:29.631823 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift podName:d5b28d1e-e702-4528-9964-72ad176a20b3 nodeName:}" failed. No retries permitted until 2025-11-28 13:39:31.631799881 +0000 UTC m=+1263.659175038 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift") pod "swift-storage-0" (UID: "d5b28d1e-e702-4528-9964-72ad176a20b3") : configmap "swift-ring-files" not found Nov 28 13:39:29 crc kubenswrapper[4857]: I1128 13:39:29.804829 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xhgkc" event={"ID":"b208ca3d-5127-4e9d-ba17-a68dc507f085","Type":"ContainerStarted","Data":"51e63bae5a9e13b73beb4bf98d146874e03ae9998a3b131b839483049f93aa5d"} Nov 28 13:39:29 crc kubenswrapper[4857]: I1128 13:39:29.807428 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" event={"ID":"c61bf456-25ac-453e-87cf-e0694d637c22","Type":"ContainerStarted","Data":"a52115381f0e1bb2ee8bc7978d1269438baa8c3a8f4f36bf04bebd70ee0ffa32"} Nov 28 13:39:29 crc kubenswrapper[4857]: I1128 13:39:29.807587 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:29 crc kubenswrapper[4857]: I1128 13:39:29.825377 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" podStartSLOduration=3.8253575140000002 podStartE2EDuration="3.825357514s" podCreationTimestamp="2025-11-28 13:39:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:39:29.822989295 +0000 UTC m=+1261.850364472" watchObservedRunningTime="2025-11-28 13:39:29.825357514 +0000 UTC m=+1261.852732701" Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.219707 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-r57mh"] Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.220726 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-r57mh" Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.233292 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-r57mh"] Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.328843 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-eef9-account-create-update-kg8k5"] Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.330534 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-eef9-account-create-update-kg8k5" Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.332475 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.339122 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-eef9-account-create-update-kg8k5"] Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.340342 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6da2e15b-2a40-4bf6-8502-5ac68921b525-operator-scripts\") pod \"glance-db-create-r57mh\" (UID: \"6da2e15b-2a40-4bf6-8502-5ac68921b525\") " pod="openstack/glance-db-create-r57mh" Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.340486 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tmm7\" (UniqueName: \"kubernetes.io/projected/6da2e15b-2a40-4bf6-8502-5ac68921b525-kube-api-access-5tmm7\") pod \"glance-db-create-r57mh\" (UID: \"6da2e15b-2a40-4bf6-8502-5ac68921b525\") " pod="openstack/glance-db-create-r57mh" Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.442134 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tmm7\" (UniqueName: \"kubernetes.io/projected/6da2e15b-2a40-4bf6-8502-5ac68921b525-kube-api-access-5tmm7\") pod \"glance-db-create-r57mh\" (UID: \"6da2e15b-2a40-4bf6-8502-5ac68921b525\") " pod="openstack/glance-db-create-r57mh" Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.442252 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqzkd\" (UniqueName: \"kubernetes.io/projected/96945c0e-06fd-4880-9fc5-b1be1e15474d-kube-api-access-sqzkd\") pod \"glance-eef9-account-create-update-kg8k5\" (UID: \"96945c0e-06fd-4880-9fc5-b1be1e15474d\") " pod="openstack/glance-eef9-account-create-update-kg8k5" Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.442289 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/96945c0e-06fd-4880-9fc5-b1be1e15474d-operator-scripts\") pod \"glance-eef9-account-create-update-kg8k5\" (UID: \"96945c0e-06fd-4880-9fc5-b1be1e15474d\") " pod="openstack/glance-eef9-account-create-update-kg8k5" Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.442310 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6da2e15b-2a40-4bf6-8502-5ac68921b525-operator-scripts\") pod \"glance-db-create-r57mh\" (UID: \"6da2e15b-2a40-4bf6-8502-5ac68921b525\") " pod="openstack/glance-db-create-r57mh" Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.443167 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6da2e15b-2a40-4bf6-8502-5ac68921b525-operator-scripts\") pod \"glance-db-create-r57mh\" (UID: \"6da2e15b-2a40-4bf6-8502-5ac68921b525\") " pod="openstack/glance-db-create-r57mh" Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.463325 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tmm7\" (UniqueName: \"kubernetes.io/projected/6da2e15b-2a40-4bf6-8502-5ac68921b525-kube-api-access-5tmm7\") pod \"glance-db-create-r57mh\" (UID: \"6da2e15b-2a40-4bf6-8502-5ac68921b525\") " pod="openstack/glance-db-create-r57mh" Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.543606 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqzkd\" (UniqueName: \"kubernetes.io/projected/96945c0e-06fd-4880-9fc5-b1be1e15474d-kube-api-access-sqzkd\") pod \"glance-eef9-account-create-update-kg8k5\" (UID: \"96945c0e-06fd-4880-9fc5-b1be1e15474d\") " pod="openstack/glance-eef9-account-create-update-kg8k5" Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.543674 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/96945c0e-06fd-4880-9fc5-b1be1e15474d-operator-scripts\") pod \"glance-eef9-account-create-update-kg8k5\" (UID: \"96945c0e-06fd-4880-9fc5-b1be1e15474d\") " pod="openstack/glance-eef9-account-create-update-kg8k5" Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.544658 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/96945c0e-06fd-4880-9fc5-b1be1e15474d-operator-scripts\") pod \"glance-eef9-account-create-update-kg8k5\" (UID: \"96945c0e-06fd-4880-9fc5-b1be1e15474d\") " pod="openstack/glance-eef9-account-create-update-kg8k5" Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.545266 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-r57mh" Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.565980 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqzkd\" (UniqueName: \"kubernetes.io/projected/96945c0e-06fd-4880-9fc5-b1be1e15474d-kube-api-access-sqzkd\") pod \"glance-eef9-account-create-update-kg8k5\" (UID: \"96945c0e-06fd-4880-9fc5-b1be1e15474d\") " pod="openstack/glance-eef9-account-create-update-kg8k5" Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.639201 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 28 13:39:30 crc kubenswrapper[4857]: I1128 13:39:30.663439 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-eef9-account-create-update-kg8k5" Nov 28 13:39:31 crc kubenswrapper[4857]: I1128 13:39:31.010387 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-r57mh"] Nov 28 13:39:31 crc kubenswrapper[4857]: W1128 13:39:31.012638 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6da2e15b_2a40_4bf6_8502_5ac68921b525.slice/crio-bed6f6494728e63d67042639cf0804663c1f18feccab03236e4c3937145fb828 WatchSource:0}: Error finding container bed6f6494728e63d67042639cf0804663c1f18feccab03236e4c3937145fb828: Status 404 returned error can't find the container with id bed6f6494728e63d67042639cf0804663c1f18feccab03236e4c3937145fb828 Nov 28 13:39:31 crc kubenswrapper[4857]: I1128 13:39:31.167531 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-eef9-account-create-update-kg8k5"] Nov 28 13:39:31 crc kubenswrapper[4857]: I1128 13:39:31.664440 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:31 crc kubenswrapper[4857]: E1128 13:39:31.664715 4857 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 13:39:31 crc kubenswrapper[4857]: E1128 13:39:31.664784 4857 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 13:39:31 crc kubenswrapper[4857]: E1128 13:39:31.664864 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift podName:d5b28d1e-e702-4528-9964-72ad176a20b3 nodeName:}" failed. No retries permitted until 2025-11-28 13:39:35.664836561 +0000 UTC m=+1267.692211768 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift") pod "swift-storage-0" (UID: "d5b28d1e-e702-4528-9964-72ad176a20b3") : configmap "swift-ring-files" not found Nov 28 13:39:31 crc kubenswrapper[4857]: W1128 13:39:31.721710 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod96945c0e_06fd_4880_9fc5_b1be1e15474d.slice/crio-32fae87a305073093c32227db239b5f6e40126ebf57921cc0d8941c916f8972b WatchSource:0}: Error finding container 32fae87a305073093c32227db239b5f6e40126ebf57921cc0d8941c916f8972b: Status 404 returned error can't find the container with id 32fae87a305073093c32227db239b5f6e40126ebf57921cc0d8941c916f8972b Nov 28 13:39:31 crc kubenswrapper[4857]: I1128 13:39:31.828055 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-r57mh" event={"ID":"6da2e15b-2a40-4bf6-8502-5ac68921b525","Type":"ContainerStarted","Data":"bed6f6494728e63d67042639cf0804663c1f18feccab03236e4c3937145fb828"} Nov 28 13:39:31 crc kubenswrapper[4857]: I1128 13:39:31.830002 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-eef9-account-create-update-kg8k5" event={"ID":"96945c0e-06fd-4880-9fc5-b1be1e15474d","Type":"ContainerStarted","Data":"32fae87a305073093c32227db239b5f6e40126ebf57921cc0d8941c916f8972b"} Nov 28 13:39:32 crc kubenswrapper[4857]: I1128 13:39:32.839278 4857 generic.go:334] "Generic (PLEG): container finished" podID="96945c0e-06fd-4880-9fc5-b1be1e15474d" containerID="c3b960d83097e2d52058b222703bf37d4168bd557a57337292c4ea2a1d269c32" exitCode=0 Nov 28 13:39:32 crc kubenswrapper[4857]: I1128 13:39:32.839368 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-eef9-account-create-update-kg8k5" event={"ID":"96945c0e-06fd-4880-9fc5-b1be1e15474d","Type":"ContainerDied","Data":"c3b960d83097e2d52058b222703bf37d4168bd557a57337292c4ea2a1d269c32"} Nov 28 13:39:32 crc kubenswrapper[4857]: I1128 13:39:32.841811 4857 generic.go:334] "Generic (PLEG): container finished" podID="6da2e15b-2a40-4bf6-8502-5ac68921b525" containerID="374e5e270733150ea249d79e7fb14a49da907af6ce513d9434f187161e66df2b" exitCode=0 Nov 28 13:39:32 crc kubenswrapper[4857]: I1128 13:39:32.841851 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-r57mh" event={"ID":"6da2e15b-2a40-4bf6-8502-5ac68921b525","Type":"ContainerDied","Data":"374e5e270733150ea249d79e7fb14a49da907af6ce513d9434f187161e66df2b"} Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.622394 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-gw7vr"] Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.627048 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-gw7vr" Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.634639 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-gw7vr"] Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.736875 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-0c6f-account-create-update-r8x7m"] Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.738892 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0c6f-account-create-update-r8x7m" Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.739923 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ndwn\" (UniqueName: \"kubernetes.io/projected/624fc019-ce85-46f4-b6b9-fc4e5f4fdfac-kube-api-access-9ndwn\") pod \"keystone-db-create-gw7vr\" (UID: \"624fc019-ce85-46f4-b6b9-fc4e5f4fdfac\") " pod="openstack/keystone-db-create-gw7vr" Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.740022 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/624fc019-ce85-46f4-b6b9-fc4e5f4fdfac-operator-scripts\") pod \"keystone-db-create-gw7vr\" (UID: \"624fc019-ce85-46f4-b6b9-fc4e5f4fdfac\") " pod="openstack/keystone-db-create-gw7vr" Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.741296 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.744258 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-0c6f-account-create-update-r8x7m"] Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.842466 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ndwn\" (UniqueName: \"kubernetes.io/projected/624fc019-ce85-46f4-b6b9-fc4e5f4fdfac-kube-api-access-9ndwn\") pod \"keystone-db-create-gw7vr\" (UID: \"624fc019-ce85-46f4-b6b9-fc4e5f4fdfac\") " pod="openstack/keystone-db-create-gw7vr" Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.842551 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/624fc019-ce85-46f4-b6b9-fc4e5f4fdfac-operator-scripts\") pod \"keystone-db-create-gw7vr\" (UID: \"624fc019-ce85-46f4-b6b9-fc4e5f4fdfac\") " pod="openstack/keystone-db-create-gw7vr" Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.842620 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8e187383-f03d-483d-bb3a-afe69f2a9d73-operator-scripts\") pod \"keystone-0c6f-account-create-update-r8x7m\" (UID: \"8e187383-f03d-483d-bb3a-afe69f2a9d73\") " pod="openstack/keystone-0c6f-account-create-update-r8x7m" Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.842672 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwf6m\" (UniqueName: \"kubernetes.io/projected/8e187383-f03d-483d-bb3a-afe69f2a9d73-kube-api-access-xwf6m\") pod \"keystone-0c6f-account-create-update-r8x7m\" (UID: \"8e187383-f03d-483d-bb3a-afe69f2a9d73\") " pod="openstack/keystone-0c6f-account-create-update-r8x7m" Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.844400 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/624fc019-ce85-46f4-b6b9-fc4e5f4fdfac-operator-scripts\") pod \"keystone-db-create-gw7vr\" (UID: \"624fc019-ce85-46f4-b6b9-fc4e5f4fdfac\") " pod="openstack/keystone-db-create-gw7vr" Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.871581 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ndwn\" (UniqueName: \"kubernetes.io/projected/624fc019-ce85-46f4-b6b9-fc4e5f4fdfac-kube-api-access-9ndwn\") pod \"keystone-db-create-gw7vr\" (UID: \"624fc019-ce85-46f4-b6b9-fc4e5f4fdfac\") " pod="openstack/keystone-db-create-gw7vr" Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.944638 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8e187383-f03d-483d-bb3a-afe69f2a9d73-operator-scripts\") pod \"keystone-0c6f-account-create-update-r8x7m\" (UID: \"8e187383-f03d-483d-bb3a-afe69f2a9d73\") " pod="openstack/keystone-0c6f-account-create-update-r8x7m" Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.944714 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwf6m\" (UniqueName: \"kubernetes.io/projected/8e187383-f03d-483d-bb3a-afe69f2a9d73-kube-api-access-xwf6m\") pod \"keystone-0c6f-account-create-update-r8x7m\" (UID: \"8e187383-f03d-483d-bb3a-afe69f2a9d73\") " pod="openstack/keystone-0c6f-account-create-update-r8x7m" Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.945707 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8e187383-f03d-483d-bb3a-afe69f2a9d73-operator-scripts\") pod \"keystone-0c6f-account-create-update-r8x7m\" (UID: \"8e187383-f03d-483d-bb3a-afe69f2a9d73\") " pod="openstack/keystone-0c6f-account-create-update-r8x7m" Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.954967 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-gw7vr" Nov 28 13:39:34 crc kubenswrapper[4857]: I1128 13:39:34.965292 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwf6m\" (UniqueName: \"kubernetes.io/projected/8e187383-f03d-483d-bb3a-afe69f2a9d73-kube-api-access-xwf6m\") pod \"keystone-0c6f-account-create-update-r8x7m\" (UID: \"8e187383-f03d-483d-bb3a-afe69f2a9d73\") " pod="openstack/keystone-0c6f-account-create-update-r8x7m" Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.065946 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0c6f-account-create-update-r8x7m" Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.257749 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-eef9-account-create-update-kg8k5" Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.273293 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-r57mh" Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.351670 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sqzkd\" (UniqueName: \"kubernetes.io/projected/96945c0e-06fd-4880-9fc5-b1be1e15474d-kube-api-access-sqzkd\") pod \"96945c0e-06fd-4880-9fc5-b1be1e15474d\" (UID: \"96945c0e-06fd-4880-9fc5-b1be1e15474d\") " Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.352942 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6da2e15b-2a40-4bf6-8502-5ac68921b525-operator-scripts\") pod \"6da2e15b-2a40-4bf6-8502-5ac68921b525\" (UID: \"6da2e15b-2a40-4bf6-8502-5ac68921b525\") " Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.353012 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/96945c0e-06fd-4880-9fc5-b1be1e15474d-operator-scripts\") pod \"96945c0e-06fd-4880-9fc5-b1be1e15474d\" (UID: \"96945c0e-06fd-4880-9fc5-b1be1e15474d\") " Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.353096 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5tmm7\" (UniqueName: \"kubernetes.io/projected/6da2e15b-2a40-4bf6-8502-5ac68921b525-kube-api-access-5tmm7\") pod \"6da2e15b-2a40-4bf6-8502-5ac68921b525\" (UID: \"6da2e15b-2a40-4bf6-8502-5ac68921b525\") " Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.353919 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6da2e15b-2a40-4bf6-8502-5ac68921b525-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6da2e15b-2a40-4bf6-8502-5ac68921b525" (UID: "6da2e15b-2a40-4bf6-8502-5ac68921b525"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.354006 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96945c0e-06fd-4880-9fc5-b1be1e15474d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "96945c0e-06fd-4880-9fc5-b1be1e15474d" (UID: "96945c0e-06fd-4880-9fc5-b1be1e15474d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.358560 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96945c0e-06fd-4880-9fc5-b1be1e15474d-kube-api-access-sqzkd" (OuterVolumeSpecName: "kube-api-access-sqzkd") pod "96945c0e-06fd-4880-9fc5-b1be1e15474d" (UID: "96945c0e-06fd-4880-9fc5-b1be1e15474d"). InnerVolumeSpecName "kube-api-access-sqzkd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.359122 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6da2e15b-2a40-4bf6-8502-5ac68921b525-kube-api-access-5tmm7" (OuterVolumeSpecName: "kube-api-access-5tmm7") pod "6da2e15b-2a40-4bf6-8502-5ac68921b525" (UID: "6da2e15b-2a40-4bf6-8502-5ac68921b525"). InnerVolumeSpecName "kube-api-access-5tmm7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.455931 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/96945c0e-06fd-4880-9fc5-b1be1e15474d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.456261 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5tmm7\" (UniqueName: \"kubernetes.io/projected/6da2e15b-2a40-4bf6-8502-5ac68921b525-kube-api-access-5tmm7\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.456272 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sqzkd\" (UniqueName: \"kubernetes.io/projected/96945c0e-06fd-4880-9fc5-b1be1e15474d-kube-api-access-sqzkd\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.456281 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6da2e15b-2a40-4bf6-8502-5ac68921b525-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.631973 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-0c6f-account-create-update-r8x7m"] Nov 28 13:39:35 crc kubenswrapper[4857]: W1128 13:39:35.636233 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e187383_f03d_483d_bb3a_afe69f2a9d73.slice/crio-76cefa767c00bd94edd3d4794ea127e267a9c95fc9e9e4a3c2fb3698b9989cd3 WatchSource:0}: Error finding container 76cefa767c00bd94edd3d4794ea127e267a9c95fc9e9e4a3c2fb3698b9989cd3: Status 404 returned error can't find the container with id 76cefa767c00bd94edd3d4794ea127e267a9c95fc9e9e4a3c2fb3698b9989cd3 Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.644400 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-gw7vr"] Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.761156 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:35 crc kubenswrapper[4857]: E1128 13:39:35.761340 4857 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 13:39:35 crc kubenswrapper[4857]: E1128 13:39:35.761358 4857 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 13:39:35 crc kubenswrapper[4857]: E1128 13:39:35.761407 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift podName:d5b28d1e-e702-4528-9964-72ad176a20b3 nodeName:}" failed. No retries permitted until 2025-11-28 13:39:43.761391223 +0000 UTC m=+1275.788766390 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift") pod "swift-storage-0" (UID: "d5b28d1e-e702-4528-9964-72ad176a20b3") : configmap "swift-ring-files" not found Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.871909 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-eef9-account-create-update-kg8k5" Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.877078 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-eef9-account-create-update-kg8k5" event={"ID":"96945c0e-06fd-4880-9fc5-b1be1e15474d","Type":"ContainerDied","Data":"32fae87a305073093c32227db239b5f6e40126ebf57921cc0d8941c916f8972b"} Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.877143 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="32fae87a305073093c32227db239b5f6e40126ebf57921cc0d8941c916f8972b" Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.878908 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xhgkc" event={"ID":"b208ca3d-5127-4e9d-ba17-a68dc507f085","Type":"ContainerStarted","Data":"9571dde5e18b53bcb326ad442ce10db79198107d0140d43bcf9dc5435a836278"} Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.884744 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0c6f-account-create-update-r8x7m" event={"ID":"8e187383-f03d-483d-bb3a-afe69f2a9d73","Type":"ContainerStarted","Data":"76cefa767c00bd94edd3d4794ea127e267a9c95fc9e9e4a3c2fb3698b9989cd3"} Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.898409 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-gw7vr" event={"ID":"624fc019-ce85-46f4-b6b9-fc4e5f4fdfac","Type":"ContainerStarted","Data":"1cd82a2dfb0b12ef63f95362c68122a93ab3fa9b8a916d8a53919f1f63fea50c"} Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.908226 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-r57mh" event={"ID":"6da2e15b-2a40-4bf6-8502-5ac68921b525","Type":"ContainerDied","Data":"bed6f6494728e63d67042639cf0804663c1f18feccab03236e4c3937145fb828"} Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.908270 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-r57mh" Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.908573 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bed6f6494728e63d67042639cf0804663c1f18feccab03236e4c3937145fb828" Nov 28 13:39:35 crc kubenswrapper[4857]: I1128 13:39:35.915089 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-xhgkc" podStartSLOduration=2.011707577 podStartE2EDuration="7.915062711s" podCreationTimestamp="2025-11-28 13:39:28 +0000 UTC" firstStartedPulling="2025-11-28 13:39:29.256038616 +0000 UTC m=+1261.283413783" lastFinishedPulling="2025-11-28 13:39:35.15939375 +0000 UTC m=+1267.186768917" observedRunningTime="2025-11-28 13:39:35.905868675 +0000 UTC m=+1267.933243852" watchObservedRunningTime="2025-11-28 13:39:35.915062711 +0000 UTC m=+1267.942437888" Nov 28 13:39:36 crc kubenswrapper[4857]: I1128 13:39:36.924615 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 13:39:37 crc kubenswrapper[4857]: I1128 13:39:37.179896 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:39:37 crc kubenswrapper[4857]: I1128 13:39:37.245680 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-m8rq9"] Nov 28 13:39:37 crc kubenswrapper[4857]: I1128 13:39:37.245938 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-m8rq9" podUID="eb8b0164-6add-4d4d-81d6-2a9e5814bbc4" containerName="dnsmasq-dns" containerID="cri-o://f5da92f08802942713f8bc36dd148ab39e7986210811f457a325c152ded87c9b" gracePeriod=10 Nov 28 13:39:37 crc kubenswrapper[4857]: I1128 13:39:37.938139 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0c6f-account-create-update-r8x7m" event={"ID":"8e187383-f03d-483d-bb3a-afe69f2a9d73","Type":"ContainerStarted","Data":"5351abf72165635270eecdbf0fccb9f428f502d3b3b5b6535e65aa42cf4be817"} Nov 28 13:39:37 crc kubenswrapper[4857]: I1128 13:39:37.943252 4857 generic.go:334] "Generic (PLEG): container finished" podID="624fc019-ce85-46f4-b6b9-fc4e5f4fdfac" containerID="c9fbfe8853f76db26be85fbc07e14c1743b9d9e65d61b36a44483686c3031648" exitCode=0 Nov 28 13:39:37 crc kubenswrapper[4857]: I1128 13:39:37.943379 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-gw7vr" event={"ID":"624fc019-ce85-46f4-b6b9-fc4e5f4fdfac","Type":"ContainerDied","Data":"c9fbfe8853f76db26be85fbc07e14c1743b9d9e65d61b36a44483686c3031648"} Nov 28 13:39:37 crc kubenswrapper[4857]: I1128 13:39:37.955245 4857 generic.go:334] "Generic (PLEG): container finished" podID="eb8b0164-6add-4d4d-81d6-2a9e5814bbc4" containerID="f5da92f08802942713f8bc36dd148ab39e7986210811f457a325c152ded87c9b" exitCode=0 Nov 28 13:39:37 crc kubenswrapper[4857]: I1128 13:39:37.955321 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-m8rq9" event={"ID":"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4","Type":"ContainerDied","Data":"f5da92f08802942713f8bc36dd148ab39e7986210811f457a325c152ded87c9b"} Nov 28 13:39:37 crc kubenswrapper[4857]: I1128 13:39:37.965357 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-0c6f-account-create-update-r8x7m" podStartSLOduration=3.965331329 podStartE2EDuration="3.965331329s" podCreationTimestamp="2025-11-28 13:39:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:39:37.957162973 +0000 UTC m=+1269.984538140" watchObservedRunningTime="2025-11-28 13:39:37.965331329 +0000 UTC m=+1269.992706496" Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.238986 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.411323 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-config\") pod \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.411371 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-ovsdbserver-sb\") pod \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.411405 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-ovsdbserver-nb\") pod \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.411433 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fzmj\" (UniqueName: \"kubernetes.io/projected/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-kube-api-access-9fzmj\") pod \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.411613 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-dns-svc\") pod \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\" (UID: \"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4\") " Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.418999 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-kube-api-access-9fzmj" (OuterVolumeSpecName: "kube-api-access-9fzmj") pod "eb8b0164-6add-4d4d-81d6-2a9e5814bbc4" (UID: "eb8b0164-6add-4d4d-81d6-2a9e5814bbc4"). InnerVolumeSpecName "kube-api-access-9fzmj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.458618 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "eb8b0164-6add-4d4d-81d6-2a9e5814bbc4" (UID: "eb8b0164-6add-4d4d-81d6-2a9e5814bbc4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.465638 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "eb8b0164-6add-4d4d-81d6-2a9e5814bbc4" (UID: "eb8b0164-6add-4d4d-81d6-2a9e5814bbc4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.465731 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "eb8b0164-6add-4d4d-81d6-2a9e5814bbc4" (UID: "eb8b0164-6add-4d4d-81d6-2a9e5814bbc4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.469134 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-config" (OuterVolumeSpecName: "config") pod "eb8b0164-6add-4d4d-81d6-2a9e5814bbc4" (UID: "eb8b0164-6add-4d4d-81d6-2a9e5814bbc4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.514152 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.514187 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.514197 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.514207 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.514218 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fzmj\" (UniqueName: \"kubernetes.io/projected/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4-kube-api-access-9fzmj\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.971562 4857 generic.go:334] "Generic (PLEG): container finished" podID="8e187383-f03d-483d-bb3a-afe69f2a9d73" containerID="5351abf72165635270eecdbf0fccb9f428f502d3b3b5b6535e65aa42cf4be817" exitCode=0 Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.971610 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0c6f-account-create-update-r8x7m" event={"ID":"8e187383-f03d-483d-bb3a-afe69f2a9d73","Type":"ContainerDied","Data":"5351abf72165635270eecdbf0fccb9f428f502d3b3b5b6535e65aa42cf4be817"} Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.976674 4857 generic.go:334] "Generic (PLEG): container finished" podID="71cc1f00-1a63-428e-8f12-2136ab077860" containerID="da003013615b1f7d03fb067beb76ca6840f95de6e79bbeb6ebc074ff574b4949" exitCode=0 Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.976747 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"71cc1f00-1a63-428e-8f12-2136ab077860","Type":"ContainerDied","Data":"da003013615b1f7d03fb067beb76ca6840f95de6e79bbeb6ebc074ff574b4949"} Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.982056 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-m8rq9" Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.982597 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-m8rq9" event={"ID":"eb8b0164-6add-4d4d-81d6-2a9e5814bbc4","Type":"ContainerDied","Data":"17608b859ba8f5bf5b77c14be1cac8000c8e465619db7f2829c25aa8ebf3b6b1"} Nov 28 13:39:38 crc kubenswrapper[4857]: I1128 13:39:38.982823 4857 scope.go:117] "RemoveContainer" containerID="f5da92f08802942713f8bc36dd148ab39e7986210811f457a325c152ded87c9b" Nov 28 13:39:39 crc kubenswrapper[4857]: I1128 13:39:39.120776 4857 scope.go:117] "RemoveContainer" containerID="ebb0c77c3a132b40ddb14f7116fa5c94fbff77ab6d399bfdfdd415f39ad76508" Nov 28 13:39:39 crc kubenswrapper[4857]: I1128 13:39:39.134284 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-m8rq9"] Nov 28 13:39:39 crc kubenswrapper[4857]: I1128 13:39:39.146544 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-m8rq9"] Nov 28 13:39:39 crc kubenswrapper[4857]: I1128 13:39:39.291500 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-gw7vr" Nov 28 13:39:39 crc kubenswrapper[4857]: I1128 13:39:39.435291 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ndwn\" (UniqueName: \"kubernetes.io/projected/624fc019-ce85-46f4-b6b9-fc4e5f4fdfac-kube-api-access-9ndwn\") pod \"624fc019-ce85-46f4-b6b9-fc4e5f4fdfac\" (UID: \"624fc019-ce85-46f4-b6b9-fc4e5f4fdfac\") " Nov 28 13:39:39 crc kubenswrapper[4857]: I1128 13:39:39.435449 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/624fc019-ce85-46f4-b6b9-fc4e5f4fdfac-operator-scripts\") pod \"624fc019-ce85-46f4-b6b9-fc4e5f4fdfac\" (UID: \"624fc019-ce85-46f4-b6b9-fc4e5f4fdfac\") " Nov 28 13:39:39 crc kubenswrapper[4857]: I1128 13:39:39.435920 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/624fc019-ce85-46f4-b6b9-fc4e5f4fdfac-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "624fc019-ce85-46f4-b6b9-fc4e5f4fdfac" (UID: "624fc019-ce85-46f4-b6b9-fc4e5f4fdfac"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:39 crc kubenswrapper[4857]: I1128 13:39:39.439718 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/624fc019-ce85-46f4-b6b9-fc4e5f4fdfac-kube-api-access-9ndwn" (OuterVolumeSpecName: "kube-api-access-9ndwn") pod "624fc019-ce85-46f4-b6b9-fc4e5f4fdfac" (UID: "624fc019-ce85-46f4-b6b9-fc4e5f4fdfac"). InnerVolumeSpecName "kube-api-access-9ndwn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:39:39 crc kubenswrapper[4857]: I1128 13:39:39.539187 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/624fc019-ce85-46f4-b6b9-fc4e5f4fdfac-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:39 crc kubenswrapper[4857]: I1128 13:39:39.539225 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ndwn\" (UniqueName: \"kubernetes.io/projected/624fc019-ce85-46f4-b6b9-fc4e5f4fdfac-kube-api-access-9ndwn\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:39 crc kubenswrapper[4857]: I1128 13:39:39.994044 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"71cc1f00-1a63-428e-8f12-2136ab077860","Type":"ContainerStarted","Data":"72d325a6ac77417281a4f0e4c5deaeb2d676cf4b75f4ac8be5b905a3b744677c"} Nov 28 13:39:39 crc kubenswrapper[4857]: I1128 13:39:39.994324 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:39:39 crc kubenswrapper[4857]: I1128 13:39:39.995670 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-gw7vr" event={"ID":"624fc019-ce85-46f4-b6b9-fc4e5f4fdfac","Type":"ContainerDied","Data":"1cd82a2dfb0b12ef63f95362c68122a93ab3fa9b8a916d8a53919f1f63fea50c"} Nov 28 13:39:39 crc kubenswrapper[4857]: I1128 13:39:39.995726 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1cd82a2dfb0b12ef63f95362c68122a93ab3fa9b8a916d8a53919f1f63fea50c" Nov 28 13:39:39 crc kubenswrapper[4857]: I1128 13:39:39.995794 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-gw7vr" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.015633 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.7959607 podStartE2EDuration="1m10.015598989s" podCreationTimestamp="2025-11-28 13:38:30 +0000 UTC" firstStartedPulling="2025-11-28 13:38:33.127826798 +0000 UTC m=+1205.155201965" lastFinishedPulling="2025-11-28 13:39:04.347465087 +0000 UTC m=+1236.374840254" observedRunningTime="2025-11-28 13:39:40.013828098 +0000 UTC m=+1272.041203275" watchObservedRunningTime="2025-11-28 13:39:40.015598989 +0000 UTC m=+1272.042974156" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.319798 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0c6f-account-create-update-r8x7m" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.321303 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb8b0164-6add-4d4d-81d6-2a9e5814bbc4" path="/var/lib/kubelet/pods/eb8b0164-6add-4d4d-81d6-2a9e5814bbc4/volumes" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.454008 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8e187383-f03d-483d-bb3a-afe69f2a9d73-operator-scripts\") pod \"8e187383-f03d-483d-bb3a-afe69f2a9d73\" (UID: \"8e187383-f03d-483d-bb3a-afe69f2a9d73\") " Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.454139 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwf6m\" (UniqueName: \"kubernetes.io/projected/8e187383-f03d-483d-bb3a-afe69f2a9d73-kube-api-access-xwf6m\") pod \"8e187383-f03d-483d-bb3a-afe69f2a9d73\" (UID: \"8e187383-f03d-483d-bb3a-afe69f2a9d73\") " Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.456028 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e187383-f03d-483d-bb3a-afe69f2a9d73-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8e187383-f03d-483d-bb3a-afe69f2a9d73" (UID: "8e187383-f03d-483d-bb3a-afe69f2a9d73"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.457628 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e187383-f03d-483d-bb3a-afe69f2a9d73-kube-api-access-xwf6m" (OuterVolumeSpecName: "kube-api-access-xwf6m") pod "8e187383-f03d-483d-bb3a-afe69f2a9d73" (UID: "8e187383-f03d-483d-bb3a-afe69f2a9d73"). InnerVolumeSpecName "kube-api-access-xwf6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.555954 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8e187383-f03d-483d-bb3a-afe69f2a9d73-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.555997 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwf6m\" (UniqueName: \"kubernetes.io/projected/8e187383-f03d-483d-bb3a-afe69f2a9d73-kube-api-access-xwf6m\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.570968 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.578442 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-dwjz2"] Nov 28 13:39:40 crc kubenswrapper[4857]: E1128 13:39:40.578825 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96945c0e-06fd-4880-9fc5-b1be1e15474d" containerName="mariadb-account-create-update" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.578840 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="96945c0e-06fd-4880-9fc5-b1be1e15474d" containerName="mariadb-account-create-update" Nov 28 13:39:40 crc kubenswrapper[4857]: E1128 13:39:40.578852 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e187383-f03d-483d-bb3a-afe69f2a9d73" containerName="mariadb-account-create-update" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.578860 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e187383-f03d-483d-bb3a-afe69f2a9d73" containerName="mariadb-account-create-update" Nov 28 13:39:40 crc kubenswrapper[4857]: E1128 13:39:40.578875 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb8b0164-6add-4d4d-81d6-2a9e5814bbc4" containerName="init" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.578883 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb8b0164-6add-4d4d-81d6-2a9e5814bbc4" containerName="init" Nov 28 13:39:40 crc kubenswrapper[4857]: E1128 13:39:40.578898 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb8b0164-6add-4d4d-81d6-2a9e5814bbc4" containerName="dnsmasq-dns" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.578906 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb8b0164-6add-4d4d-81d6-2a9e5814bbc4" containerName="dnsmasq-dns" Nov 28 13:39:40 crc kubenswrapper[4857]: E1128 13:39:40.578916 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6da2e15b-2a40-4bf6-8502-5ac68921b525" containerName="mariadb-database-create" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.578924 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6da2e15b-2a40-4bf6-8502-5ac68921b525" containerName="mariadb-database-create" Nov 28 13:39:40 crc kubenswrapper[4857]: E1128 13:39:40.578941 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="624fc019-ce85-46f4-b6b9-fc4e5f4fdfac" containerName="mariadb-database-create" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.578949 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="624fc019-ce85-46f4-b6b9-fc4e5f4fdfac" containerName="mariadb-database-create" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.579132 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e187383-f03d-483d-bb3a-afe69f2a9d73" containerName="mariadb-account-create-update" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.579151 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6da2e15b-2a40-4bf6-8502-5ac68921b525" containerName="mariadb-database-create" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.579168 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="96945c0e-06fd-4880-9fc5-b1be1e15474d" containerName="mariadb-account-create-update" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.579185 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="624fc019-ce85-46f4-b6b9-fc4e5f4fdfac" containerName="mariadb-database-create" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.579199 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb8b0164-6add-4d4d-81d6-2a9e5814bbc4" containerName="dnsmasq-dns" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.579807 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-dwjz2" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.582540 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.585488 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-krs5d" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.590864 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-dwjz2"] Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.657393 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/574e8323-bfa6-4c1d-9a87-53f09671c900-config-data\") pod \"glance-db-sync-dwjz2\" (UID: \"574e8323-bfa6-4c1d-9a87-53f09671c900\") " pod="openstack/glance-db-sync-dwjz2" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.657448 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/574e8323-bfa6-4c1d-9a87-53f09671c900-combined-ca-bundle\") pod \"glance-db-sync-dwjz2\" (UID: \"574e8323-bfa6-4c1d-9a87-53f09671c900\") " pod="openstack/glance-db-sync-dwjz2" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.657594 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cz49d\" (UniqueName: \"kubernetes.io/projected/574e8323-bfa6-4c1d-9a87-53f09671c900-kube-api-access-cz49d\") pod \"glance-db-sync-dwjz2\" (UID: \"574e8323-bfa6-4c1d-9a87-53f09671c900\") " pod="openstack/glance-db-sync-dwjz2" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.657634 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/574e8323-bfa6-4c1d-9a87-53f09671c900-db-sync-config-data\") pod \"glance-db-sync-dwjz2\" (UID: \"574e8323-bfa6-4c1d-9a87-53f09671c900\") " pod="openstack/glance-db-sync-dwjz2" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.759095 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/574e8323-bfa6-4c1d-9a87-53f09671c900-combined-ca-bundle\") pod \"glance-db-sync-dwjz2\" (UID: \"574e8323-bfa6-4c1d-9a87-53f09671c900\") " pod="openstack/glance-db-sync-dwjz2" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.759154 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cz49d\" (UniqueName: \"kubernetes.io/projected/574e8323-bfa6-4c1d-9a87-53f09671c900-kube-api-access-cz49d\") pod \"glance-db-sync-dwjz2\" (UID: \"574e8323-bfa6-4c1d-9a87-53f09671c900\") " pod="openstack/glance-db-sync-dwjz2" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.759171 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/574e8323-bfa6-4c1d-9a87-53f09671c900-db-sync-config-data\") pod \"glance-db-sync-dwjz2\" (UID: \"574e8323-bfa6-4c1d-9a87-53f09671c900\") " pod="openstack/glance-db-sync-dwjz2" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.759308 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/574e8323-bfa6-4c1d-9a87-53f09671c900-config-data\") pod \"glance-db-sync-dwjz2\" (UID: \"574e8323-bfa6-4c1d-9a87-53f09671c900\") " pod="openstack/glance-db-sync-dwjz2" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.764988 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/574e8323-bfa6-4c1d-9a87-53f09671c900-db-sync-config-data\") pod \"glance-db-sync-dwjz2\" (UID: \"574e8323-bfa6-4c1d-9a87-53f09671c900\") " pod="openstack/glance-db-sync-dwjz2" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.765457 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/574e8323-bfa6-4c1d-9a87-53f09671c900-config-data\") pod \"glance-db-sync-dwjz2\" (UID: \"574e8323-bfa6-4c1d-9a87-53f09671c900\") " pod="openstack/glance-db-sync-dwjz2" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.773538 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/574e8323-bfa6-4c1d-9a87-53f09671c900-combined-ca-bundle\") pod \"glance-db-sync-dwjz2\" (UID: \"574e8323-bfa6-4c1d-9a87-53f09671c900\") " pod="openstack/glance-db-sync-dwjz2" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.778150 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cz49d\" (UniqueName: \"kubernetes.io/projected/574e8323-bfa6-4c1d-9a87-53f09671c900-kube-api-access-cz49d\") pod \"glance-db-sync-dwjz2\" (UID: \"574e8323-bfa6-4c1d-9a87-53f09671c900\") " pod="openstack/glance-db-sync-dwjz2" Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.805949 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-t99ql" podUID="b1f7e362-6e6b-4636-b551-4533ad037811" containerName="ovn-controller" probeResult="failure" output=< Nov 28 13:39:40 crc kubenswrapper[4857]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 28 13:39:40 crc kubenswrapper[4857]: > Nov 28 13:39:40 crc kubenswrapper[4857]: I1128 13:39:40.902960 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-dwjz2" Nov 28 13:39:41 crc kubenswrapper[4857]: I1128 13:39:41.013584 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0c6f-account-create-update-r8x7m" Nov 28 13:39:41 crc kubenswrapper[4857]: I1128 13:39:41.013579 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0c6f-account-create-update-r8x7m" event={"ID":"8e187383-f03d-483d-bb3a-afe69f2a9d73","Type":"ContainerDied","Data":"76cefa767c00bd94edd3d4794ea127e267a9c95fc9e9e4a3c2fb3698b9989cd3"} Nov 28 13:39:41 crc kubenswrapper[4857]: I1128 13:39:41.013640 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76cefa767c00bd94edd3d4794ea127e267a9c95fc9e9e4a3c2fb3698b9989cd3" Nov 28 13:39:41 crc kubenswrapper[4857]: I1128 13:39:41.489660 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-dwjz2"] Nov 28 13:39:42 crc kubenswrapper[4857]: I1128 13:39:42.023285 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-dwjz2" event={"ID":"574e8323-bfa6-4c1d-9a87-53f09671c900","Type":"ContainerStarted","Data":"b0af93f55527451ef3d1dda53632cf34351b46d9c9f5cb9421a1b101899f911b"} Nov 28 13:39:43 crc kubenswrapper[4857]: I1128 13:39:43.825947 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:43 crc kubenswrapper[4857]: I1128 13:39:43.852664 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift\") pod \"swift-storage-0\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " pod="openstack/swift-storage-0" Nov 28 13:39:43 crc kubenswrapper[4857]: I1128 13:39:43.905518 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 13:39:44 crc kubenswrapper[4857]: I1128 13:39:44.053511 4857 generic.go:334] "Generic (PLEG): container finished" podID="b208ca3d-5127-4e9d-ba17-a68dc507f085" containerID="9571dde5e18b53bcb326ad442ce10db79198107d0140d43bcf9dc5435a836278" exitCode=0 Nov 28 13:39:44 crc kubenswrapper[4857]: I1128 13:39:44.053556 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xhgkc" event={"ID":"b208ca3d-5127-4e9d-ba17-a68dc507f085","Type":"ContainerDied","Data":"9571dde5e18b53bcb326ad442ce10db79198107d0140d43bcf9dc5435a836278"} Nov 28 13:39:44 crc kubenswrapper[4857]: I1128 13:39:44.483090 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 28 13:39:44 crc kubenswrapper[4857]: W1128 13:39:44.486539 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd5b28d1e_e702_4528_9964_72ad176a20b3.slice/crio-11aedcbefb28558d19f31a8c3d03b54f3dd09a623275daa4f4ea2749768593c6 WatchSource:0}: Error finding container 11aedcbefb28558d19f31a8c3d03b54f3dd09a623275daa4f4ea2749768593c6: Status 404 returned error can't find the container with id 11aedcbefb28558d19f31a8c3d03b54f3dd09a623275daa4f4ea2749768593c6 Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.062568 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerStarted","Data":"11aedcbefb28558d19f31a8c3d03b54f3dd09a623275daa4f4ea2749768593c6"} Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.383769 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.452400 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b208ca3d-5127-4e9d-ba17-a68dc507f085-combined-ca-bundle\") pod \"b208ca3d-5127-4e9d-ba17-a68dc507f085\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.452452 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b208ca3d-5127-4e9d-ba17-a68dc507f085-etc-swift\") pod \"b208ca3d-5127-4e9d-ba17-a68dc507f085\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.452518 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b208ca3d-5127-4e9d-ba17-a68dc507f085-scripts\") pod \"b208ca3d-5127-4e9d-ba17-a68dc507f085\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.452565 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b208ca3d-5127-4e9d-ba17-a68dc507f085-ring-data-devices\") pod \"b208ca3d-5127-4e9d-ba17-a68dc507f085\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.452655 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b208ca3d-5127-4e9d-ba17-a68dc507f085-dispersionconf\") pod \"b208ca3d-5127-4e9d-ba17-a68dc507f085\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.452703 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b208ca3d-5127-4e9d-ba17-a68dc507f085-swiftconf\") pod \"b208ca3d-5127-4e9d-ba17-a68dc507f085\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.452741 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdwx6\" (UniqueName: \"kubernetes.io/projected/b208ca3d-5127-4e9d-ba17-a68dc507f085-kube-api-access-kdwx6\") pod \"b208ca3d-5127-4e9d-ba17-a68dc507f085\" (UID: \"b208ca3d-5127-4e9d-ba17-a68dc507f085\") " Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.454589 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b208ca3d-5127-4e9d-ba17-a68dc507f085-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "b208ca3d-5127-4e9d-ba17-a68dc507f085" (UID: "b208ca3d-5127-4e9d-ba17-a68dc507f085"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.455130 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b208ca3d-5127-4e9d-ba17-a68dc507f085-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "b208ca3d-5127-4e9d-ba17-a68dc507f085" (UID: "b208ca3d-5127-4e9d-ba17-a68dc507f085"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.462995 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b208ca3d-5127-4e9d-ba17-a68dc507f085-kube-api-access-kdwx6" (OuterVolumeSpecName: "kube-api-access-kdwx6") pod "b208ca3d-5127-4e9d-ba17-a68dc507f085" (UID: "b208ca3d-5127-4e9d-ba17-a68dc507f085"). InnerVolumeSpecName "kube-api-access-kdwx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.476389 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b208ca3d-5127-4e9d-ba17-a68dc507f085-scripts" (OuterVolumeSpecName: "scripts") pod "b208ca3d-5127-4e9d-ba17-a68dc507f085" (UID: "b208ca3d-5127-4e9d-ba17-a68dc507f085"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.481605 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b208ca3d-5127-4e9d-ba17-a68dc507f085-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "b208ca3d-5127-4e9d-ba17-a68dc507f085" (UID: "b208ca3d-5127-4e9d-ba17-a68dc507f085"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.498196 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b208ca3d-5127-4e9d-ba17-a68dc507f085-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b208ca3d-5127-4e9d-ba17-a68dc507f085" (UID: "b208ca3d-5127-4e9d-ba17-a68dc507f085"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.513929 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b208ca3d-5127-4e9d-ba17-a68dc507f085-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "b208ca3d-5127-4e9d-ba17-a68dc507f085" (UID: "b208ca3d-5127-4e9d-ba17-a68dc507f085"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.556323 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b208ca3d-5127-4e9d-ba17-a68dc507f085-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.556354 4857 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b208ca3d-5127-4e9d-ba17-a68dc507f085-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.556367 4857 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b208ca3d-5127-4e9d-ba17-a68dc507f085-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.556377 4857 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b208ca3d-5127-4e9d-ba17-a68dc507f085-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.556386 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kdwx6\" (UniqueName: \"kubernetes.io/projected/b208ca3d-5127-4e9d-ba17-a68dc507f085-kube-api-access-kdwx6\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.556394 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b208ca3d-5127-4e9d-ba17-a68dc507f085-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.556401 4857 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b208ca3d-5127-4e9d-ba17-a68dc507f085-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.569093 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.789785 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-t99ql-config-2w9j7"] Nov 28 13:39:45 crc kubenswrapper[4857]: E1128 13:39:45.790892 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b208ca3d-5127-4e9d-ba17-a68dc507f085" containerName="swift-ring-rebalance" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.790913 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b208ca3d-5127-4e9d-ba17-a68dc507f085" containerName="swift-ring-rebalance" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.791114 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b208ca3d-5127-4e9d-ba17-a68dc507f085" containerName="swift-ring-rebalance" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.791679 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.795976 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.803231 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-t99ql-config-2w9j7"] Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.809735 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-t99ql" podUID="b1f7e362-6e6b-4636-b551-4533ad037811" containerName="ovn-controller" probeResult="failure" output=< Nov 28 13:39:45 crc kubenswrapper[4857]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 28 13:39:45 crc kubenswrapper[4857]: > Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.863596 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42581b3f-cc25-4477-883f-140d470b8f1e-var-run-ovn\") pod \"ovn-controller-t99ql-config-2w9j7\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.863693 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42581b3f-cc25-4477-883f-140d470b8f1e-additional-scripts\") pod \"ovn-controller-t99ql-config-2w9j7\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.863740 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42581b3f-cc25-4477-883f-140d470b8f1e-var-run\") pod \"ovn-controller-t99ql-config-2w9j7\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.863900 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mwkm\" (UniqueName: \"kubernetes.io/projected/42581b3f-cc25-4477-883f-140d470b8f1e-kube-api-access-7mwkm\") pod \"ovn-controller-t99ql-config-2w9j7\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.863974 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42581b3f-cc25-4477-883f-140d470b8f1e-var-log-ovn\") pod \"ovn-controller-t99ql-config-2w9j7\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.864004 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42581b3f-cc25-4477-883f-140d470b8f1e-scripts\") pod \"ovn-controller-t99ql-config-2w9j7\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.965511 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42581b3f-cc25-4477-883f-140d470b8f1e-var-run-ovn\") pod \"ovn-controller-t99ql-config-2w9j7\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.965645 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42581b3f-cc25-4477-883f-140d470b8f1e-additional-scripts\") pod \"ovn-controller-t99ql-config-2w9j7\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.965724 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42581b3f-cc25-4477-883f-140d470b8f1e-var-run\") pod \"ovn-controller-t99ql-config-2w9j7\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.965765 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42581b3f-cc25-4477-883f-140d470b8f1e-var-run-ovn\") pod \"ovn-controller-t99ql-config-2w9j7\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.966399 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42581b3f-cc25-4477-883f-140d470b8f1e-additional-scripts\") pod \"ovn-controller-t99ql-config-2w9j7\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.968265 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42581b3f-cc25-4477-883f-140d470b8f1e-var-run\") pod \"ovn-controller-t99ql-config-2w9j7\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.968288 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mwkm\" (UniqueName: \"kubernetes.io/projected/42581b3f-cc25-4477-883f-140d470b8f1e-kube-api-access-7mwkm\") pod \"ovn-controller-t99ql-config-2w9j7\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.968381 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42581b3f-cc25-4477-883f-140d470b8f1e-var-log-ovn\") pod \"ovn-controller-t99ql-config-2w9j7\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.968418 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42581b3f-cc25-4477-883f-140d470b8f1e-scripts\") pod \"ovn-controller-t99ql-config-2w9j7\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.968616 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42581b3f-cc25-4477-883f-140d470b8f1e-var-log-ovn\") pod \"ovn-controller-t99ql-config-2w9j7\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.977487 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42581b3f-cc25-4477-883f-140d470b8f1e-scripts\") pod \"ovn-controller-t99ql-config-2w9j7\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:45 crc kubenswrapper[4857]: I1128 13:39:45.989427 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mwkm\" (UniqueName: \"kubernetes.io/projected/42581b3f-cc25-4477-883f-140d470b8f1e-kube-api-access-7mwkm\") pod \"ovn-controller-t99ql-config-2w9j7\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:46 crc kubenswrapper[4857]: I1128 13:39:46.069833 4857 generic.go:334] "Generic (PLEG): container finished" podID="cfbd0457-d459-4bf2-bdaf-8b61db5cce65" containerID="0b4fdb93170b6f9968d2f6150fa31e0cec84ad5ec1c7df2d3bf7d8ff7467e6e7" exitCode=0 Nov 28 13:39:46 crc kubenswrapper[4857]: I1128 13:39:46.069908 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cfbd0457-d459-4bf2-bdaf-8b61db5cce65","Type":"ContainerDied","Data":"0b4fdb93170b6f9968d2f6150fa31e0cec84ad5ec1c7df2d3bf7d8ff7467e6e7"} Nov 28 13:39:46 crc kubenswrapper[4857]: I1128 13:39:46.073695 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xhgkc" event={"ID":"b208ca3d-5127-4e9d-ba17-a68dc507f085","Type":"ContainerDied","Data":"51e63bae5a9e13b73beb4bf98d146874e03ae9998a3b131b839483049f93aa5d"} Nov 28 13:39:46 crc kubenswrapper[4857]: I1128 13:39:46.073718 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="51e63bae5a9e13b73beb4bf98d146874e03ae9998a3b131b839483049f93aa5d" Nov 28 13:39:46 crc kubenswrapper[4857]: I1128 13:39:46.073773 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xhgkc" Nov 28 13:39:46 crc kubenswrapper[4857]: I1128 13:39:46.114498 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:46 crc kubenswrapper[4857]: I1128 13:39:46.593913 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-t99ql-config-2w9j7"] Nov 28 13:39:46 crc kubenswrapper[4857]: W1128 13:39:46.609515 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod42581b3f_cc25_4477_883f_140d470b8f1e.slice/crio-0ecaef163000422512fae4387767353d91e63d00f682a70c5593e19a9fcb4d6e WatchSource:0}: Error finding container 0ecaef163000422512fae4387767353d91e63d00f682a70c5593e19a9fcb4d6e: Status 404 returned error can't find the container with id 0ecaef163000422512fae4387767353d91e63d00f682a70c5593e19a9fcb4d6e Nov 28 13:39:47 crc kubenswrapper[4857]: I1128 13:39:47.087318 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-t99ql-config-2w9j7" event={"ID":"42581b3f-cc25-4477-883f-140d470b8f1e","Type":"ContainerStarted","Data":"08bd3edd8a1d8b0073c2499c1dbe3a904476b92917461d12cbf12f279796f8e7"} Nov 28 13:39:47 crc kubenswrapper[4857]: I1128 13:39:47.087846 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-t99ql-config-2w9j7" event={"ID":"42581b3f-cc25-4477-883f-140d470b8f1e","Type":"ContainerStarted","Data":"0ecaef163000422512fae4387767353d91e63d00f682a70c5593e19a9fcb4d6e"} Nov 28 13:39:47 crc kubenswrapper[4857]: I1128 13:39:47.092630 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cfbd0457-d459-4bf2-bdaf-8b61db5cce65","Type":"ContainerStarted","Data":"b4dc40ec2aafb3b05e54fb73bbf1e3fb91135c9bbf7ec2c351e4ea6cea29e654"} Nov 28 13:39:47 crc kubenswrapper[4857]: I1128 13:39:47.093243 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 28 13:39:47 crc kubenswrapper[4857]: I1128 13:39:47.102264 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerStarted","Data":"cba71b9ab843952cf7d72667e396c7374c1e7e44e8883fc3704df6fae16f5f38"} Nov 28 13:39:47 crc kubenswrapper[4857]: I1128 13:39:47.102364 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerStarted","Data":"c31f95cb1b9a065f105c67a07bd5d5b7cf66901a282cda1c3bec560e21d74414"} Nov 28 13:39:47 crc kubenswrapper[4857]: I1128 13:39:47.102383 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerStarted","Data":"470dd259d2efea986a51d98e27e81921a7309bef3934a73e5e73feb96d784778"} Nov 28 13:39:47 crc kubenswrapper[4857]: I1128 13:39:47.102414 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerStarted","Data":"43f3af2bcb6a92ec4e0c79358397d8a0e3515b9b8ec39a557f85c39ba849f2e2"} Nov 28 13:39:47 crc kubenswrapper[4857]: I1128 13:39:47.114638 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-t99ql-config-2w9j7" podStartSLOduration=2.11461244 podStartE2EDuration="2.11461244s" podCreationTimestamp="2025-11-28 13:39:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:39:47.106237158 +0000 UTC m=+1279.133612585" watchObservedRunningTime="2025-11-28 13:39:47.11461244 +0000 UTC m=+1279.141987607" Nov 28 13:39:47 crc kubenswrapper[4857]: I1128 13:39:47.145221 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=-9223371959.709572 podStartE2EDuration="1m17.145204086s" podCreationTimestamp="2025-11-28 13:38:30 +0000 UTC" firstStartedPulling="2025-11-28 13:38:32.630082945 +0000 UTC m=+1204.657458122" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:39:47.143232919 +0000 UTC m=+1279.170608096" watchObservedRunningTime="2025-11-28 13:39:47.145204086 +0000 UTC m=+1279.172579253" Nov 28 13:39:48 crc kubenswrapper[4857]: I1128 13:39:48.111520 4857 generic.go:334] "Generic (PLEG): container finished" podID="42581b3f-cc25-4477-883f-140d470b8f1e" containerID="08bd3edd8a1d8b0073c2499c1dbe3a904476b92917461d12cbf12f279796f8e7" exitCode=0 Nov 28 13:39:48 crc kubenswrapper[4857]: I1128 13:39:48.112441 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-t99ql-config-2w9j7" event={"ID":"42581b3f-cc25-4477-883f-140d470b8f1e","Type":"ContainerDied","Data":"08bd3edd8a1d8b0073c2499c1dbe3a904476b92917461d12cbf12f279796f8e7"} Nov 28 13:39:50 crc kubenswrapper[4857]: I1128 13:39:50.813802 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-t99ql" Nov 28 13:39:52 crc kubenswrapper[4857]: I1128 13:39:52.440076 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:39:57 crc kubenswrapper[4857]: E1128 13:39:57.660881 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Nov 28 13:39:57 crc kubenswrapper[4857]: E1128 13:39:57.661451 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cz49d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-dwjz2_openstack(574e8323-bfa6-4c1d-9a87-53f09671c900): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:39:57 crc kubenswrapper[4857]: E1128 13:39:57.662666 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-dwjz2" podUID="574e8323-bfa6-4c1d-9a87-53f09671c900" Nov 28 13:39:57 crc kubenswrapper[4857]: E1128 13:39:57.867637 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-swift-container:current-podified" Nov 28 13:39:57 crc kubenswrapper[4857]: E1128 13:39:57.867880 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:container-server,Image:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,Command:[/usr/bin/swift-container-server /etc/swift/container-server.conf.d -v],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:container,HostPort:0,ContainerPort:6201,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5b7h56h9dh94h67bh697h95h55hbh555h556h675h5fdh57dh579h5fbh64fh5c9h687hb6h678h5d4h549h54h98h8ch564h5bh5bch55dhc8hf8q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:swift,ReadOnly:false,MountPath:/srv/node/pv,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:etc-swift,ReadOnly:false,MountPath:/etc/swift,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cache,ReadOnly:false,MountPath:/var/cache/swift,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:lock,ReadOnly:false,MountPath:/var/lock,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wn8tz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42445,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-storage-0_openstack(d5b28d1e-e702-4528-9964-72ad176a20b3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:39:57 crc kubenswrapper[4857]: I1128 13:39:57.949937 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.090975 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42581b3f-cc25-4477-883f-140d470b8f1e-var-run-ovn\") pod \"42581b3f-cc25-4477-883f-140d470b8f1e\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.091134 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/42581b3f-cc25-4477-883f-140d470b8f1e-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "42581b3f-cc25-4477-883f-140d470b8f1e" (UID: "42581b3f-cc25-4477-883f-140d470b8f1e"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.091276 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42581b3f-cc25-4477-883f-140d470b8f1e-scripts\") pod \"42581b3f-cc25-4477-883f-140d470b8f1e\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.091438 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42581b3f-cc25-4477-883f-140d470b8f1e-var-run\") pod \"42581b3f-cc25-4477-883f-140d470b8f1e\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.091554 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/42581b3f-cc25-4477-883f-140d470b8f1e-var-run" (OuterVolumeSpecName: "var-run") pod "42581b3f-cc25-4477-883f-140d470b8f1e" (UID: "42581b3f-cc25-4477-883f-140d470b8f1e"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.091559 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mwkm\" (UniqueName: \"kubernetes.io/projected/42581b3f-cc25-4477-883f-140d470b8f1e-kube-api-access-7mwkm\") pod \"42581b3f-cc25-4477-883f-140d470b8f1e\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.091708 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42581b3f-cc25-4477-883f-140d470b8f1e-additional-scripts\") pod \"42581b3f-cc25-4477-883f-140d470b8f1e\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.091776 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42581b3f-cc25-4477-883f-140d470b8f1e-var-log-ovn\") pod \"42581b3f-cc25-4477-883f-140d470b8f1e\" (UID: \"42581b3f-cc25-4477-883f-140d470b8f1e\") " Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.092048 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/42581b3f-cc25-4477-883f-140d470b8f1e-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "42581b3f-cc25-4477-883f-140d470b8f1e" (UID: "42581b3f-cc25-4477-883f-140d470b8f1e"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.092280 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42581b3f-cc25-4477-883f-140d470b8f1e-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "42581b3f-cc25-4477-883f-140d470b8f1e" (UID: "42581b3f-cc25-4477-883f-140d470b8f1e"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.092421 4857 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42581b3f-cc25-4477-883f-140d470b8f1e-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.092439 4857 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42581b3f-cc25-4477-883f-140d470b8f1e-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.092448 4857 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42581b3f-cc25-4477-883f-140d470b8f1e-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.092457 4857 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42581b3f-cc25-4477-883f-140d470b8f1e-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.092547 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42581b3f-cc25-4477-883f-140d470b8f1e-scripts" (OuterVolumeSpecName: "scripts") pod "42581b3f-cc25-4477-883f-140d470b8f1e" (UID: "42581b3f-cc25-4477-883f-140d470b8f1e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.096920 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42581b3f-cc25-4477-883f-140d470b8f1e-kube-api-access-7mwkm" (OuterVolumeSpecName: "kube-api-access-7mwkm") pod "42581b3f-cc25-4477-883f-140d470b8f1e" (UID: "42581b3f-cc25-4477-883f-140d470b8f1e"). InnerVolumeSpecName "kube-api-access-7mwkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.194150 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mwkm\" (UniqueName: \"kubernetes.io/projected/42581b3f-cc25-4477-883f-140d470b8f1e-kube-api-access-7mwkm\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.194180 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42581b3f-cc25-4477-883f-140d470b8f1e-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.208277 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-t99ql-config-2w9j7" event={"ID":"42581b3f-cc25-4477-883f-140d470b8f1e","Type":"ContainerDied","Data":"0ecaef163000422512fae4387767353d91e63d00f682a70c5593e19a9fcb4d6e"} Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.208307 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-t99ql-config-2w9j7" Nov 28 13:39:58 crc kubenswrapper[4857]: I1128 13:39:58.208319 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ecaef163000422512fae4387767353d91e63d00f682a70c5593e19a9fcb4d6e" Nov 28 13:39:58 crc kubenswrapper[4857]: E1128 13:39:58.209209 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-dwjz2" podUID="574e8323-bfa6-4c1d-9a87-53f09671c900" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.051336 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-t99ql-config-2w9j7"] Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.059616 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-t99ql-config-2w9j7"] Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.174725 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-t99ql-config-btx5d"] Nov 28 13:39:59 crc kubenswrapper[4857]: E1128 13:39:59.175161 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42581b3f-cc25-4477-883f-140d470b8f1e" containerName="ovn-config" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.175186 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="42581b3f-cc25-4477-883f-140d470b8f1e" containerName="ovn-config" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.175434 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="42581b3f-cc25-4477-883f-140d470b8f1e" containerName="ovn-config" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.176119 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.188270 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.204412 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-t99ql-config-btx5d"] Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.321974 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/16e90833-677c-446f-afaa-58f6a157101b-var-run\") pod \"ovn-controller-t99ql-config-btx5d\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.322011 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/16e90833-677c-446f-afaa-58f6a157101b-var-log-ovn\") pod \"ovn-controller-t99ql-config-btx5d\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.322052 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/16e90833-677c-446f-afaa-58f6a157101b-scripts\") pod \"ovn-controller-t99ql-config-btx5d\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.322075 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52b5d\" (UniqueName: \"kubernetes.io/projected/16e90833-677c-446f-afaa-58f6a157101b-kube-api-access-52b5d\") pod \"ovn-controller-t99ql-config-btx5d\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.322099 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/16e90833-677c-446f-afaa-58f6a157101b-additional-scripts\") pod \"ovn-controller-t99ql-config-btx5d\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.322195 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/16e90833-677c-446f-afaa-58f6a157101b-var-run-ovn\") pod \"ovn-controller-t99ql-config-btx5d\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.329245 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerStarted","Data":"ce54c7d58ad42d61d735cc7c28384296c4ccdf392def1ceb2994e2fc57811e5e"} Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.424196 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/16e90833-677c-446f-afaa-58f6a157101b-scripts\") pod \"ovn-controller-t99ql-config-btx5d\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.424253 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52b5d\" (UniqueName: \"kubernetes.io/projected/16e90833-677c-446f-afaa-58f6a157101b-kube-api-access-52b5d\") pod \"ovn-controller-t99ql-config-btx5d\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.424286 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/16e90833-677c-446f-afaa-58f6a157101b-additional-scripts\") pod \"ovn-controller-t99ql-config-btx5d\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.424512 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/16e90833-677c-446f-afaa-58f6a157101b-var-run-ovn\") pod \"ovn-controller-t99ql-config-btx5d\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.424577 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/16e90833-677c-446f-afaa-58f6a157101b-var-run\") pod \"ovn-controller-t99ql-config-btx5d\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.424597 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/16e90833-677c-446f-afaa-58f6a157101b-var-log-ovn\") pod \"ovn-controller-t99ql-config-btx5d\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.424947 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/16e90833-677c-446f-afaa-58f6a157101b-var-log-ovn\") pod \"ovn-controller-t99ql-config-btx5d\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.424970 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/16e90833-677c-446f-afaa-58f6a157101b-var-run-ovn\") pod \"ovn-controller-t99ql-config-btx5d\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.425654 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/16e90833-677c-446f-afaa-58f6a157101b-var-run\") pod \"ovn-controller-t99ql-config-btx5d\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.425775 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/16e90833-677c-446f-afaa-58f6a157101b-additional-scripts\") pod \"ovn-controller-t99ql-config-btx5d\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.426086 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/16e90833-677c-446f-afaa-58f6a157101b-scripts\") pod \"ovn-controller-t99ql-config-btx5d\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.457607 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52b5d\" (UniqueName: \"kubernetes.io/projected/16e90833-677c-446f-afaa-58f6a157101b-kube-api-access-52b5d\") pod \"ovn-controller-t99ql-config-btx5d\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.491966 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:39:59 crc kubenswrapper[4857]: I1128 13:39:59.956814 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-t99ql-config-btx5d"] Nov 28 13:39:59 crc kubenswrapper[4857]: W1128 13:39:59.973528 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16e90833_677c_446f_afaa_58f6a157101b.slice/crio-39afce31df283e11689a0f4b07f50e3b59e9a68812c6a2dbeeee606d56090cf1 WatchSource:0}: Error finding container 39afce31df283e11689a0f4b07f50e3b59e9a68812c6a2dbeeee606d56090cf1: Status 404 returned error can't find the container with id 39afce31df283e11689a0f4b07f50e3b59e9a68812c6a2dbeeee606d56090cf1 Nov 28 13:40:00 crc kubenswrapper[4857]: I1128 13:40:00.321303 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42581b3f-cc25-4477-883f-140d470b8f1e" path="/var/lib/kubelet/pods/42581b3f-cc25-4477-883f-140d470b8f1e/volumes" Nov 28 13:40:00 crc kubenswrapper[4857]: I1128 13:40:00.351684 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerStarted","Data":"ca6e4b07ddb5ce36fc245cda1d9e032e507ab8f51871ae61fc6d91bf3d94fbcc"} Nov 28 13:40:00 crc kubenswrapper[4857]: I1128 13:40:00.351734 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerStarted","Data":"4d8e6435aaf1596d23240a79db43f33846ba61b7ae4b65eb49e14339421d4856"} Nov 28 13:40:00 crc kubenswrapper[4857]: I1128 13:40:00.351760 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerStarted","Data":"ee780d1664f73c1b0efc34f94bcba32ef69c9316883ef0a536cf48cc92544c85"} Nov 28 13:40:00 crc kubenswrapper[4857]: I1128 13:40:00.351771 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerStarted","Data":"c3ca6f2469bf537185f1bdcbca3c0daa0bea4b5850c553e3aa9fc5b77b64d67a"} Nov 28 13:40:00 crc kubenswrapper[4857]: I1128 13:40:00.351781 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerStarted","Data":"1e96365ccd71754557edeab3d45001b8fe49eb13fc19f14f1ba33c6eb2378fc2"} Nov 28 13:40:01 crc kubenswrapper[4857]: I1128 13:40:00.366651 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-t99ql-config-btx5d" event={"ID":"16e90833-677c-446f-afaa-58f6a157101b","Type":"ContainerStarted","Data":"39afce31df283e11689a0f4b07f50e3b59e9a68812c6a2dbeeee606d56090cf1"} Nov 28 13:40:01 crc kubenswrapper[4857]: E1128 13:40:00.833728 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"container-server\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"container-replicator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-auditor\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-updater\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\"]" pod="openstack/swift-storage-0" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" Nov 28 13:40:01 crc kubenswrapper[4857]: I1128 13:40:01.380024 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerStarted","Data":"7b7ec0a8594688d16e29d5dbc41ca846eb39e0c6507989d1e1ae16d15e9b54b4"} Nov 28 13:40:01 crc kubenswrapper[4857]: I1128 13:40:01.381883 4857 generic.go:334] "Generic (PLEG): container finished" podID="16e90833-677c-446f-afaa-58f6a157101b" containerID="cf1e9309e85c6ed36109909dfa1bceafc508d39182fe6f235d9c9e9aae2f7c61" exitCode=0 Nov 28 13:40:01 crc kubenswrapper[4857]: I1128 13:40:01.381915 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-t99ql-config-btx5d" event={"ID":"16e90833-677c-446f-afaa-58f6a157101b","Type":"ContainerDied","Data":"cf1e9309e85c6ed36109909dfa1bceafc508d39182fe6f235d9c9e9aae2f7c61"} Nov 28 13:40:01 crc kubenswrapper[4857]: E1128 13:40:01.384076 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"container-server\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-replicator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-auditor\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-updater\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\"]" pod="openstack/swift-storage-0" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" Nov 28 13:40:01 crc kubenswrapper[4857]: I1128 13:40:01.873006 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.163210 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-wd9mq"] Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.164598 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-wd9mq" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.185720 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-wd9mq"] Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.286691 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7118342-937d-4707-b384-31729648d90d-operator-scripts\") pod \"cinder-db-create-wd9mq\" (UID: \"b7118342-937d-4707-b384-31729648d90d\") " pod="openstack/cinder-db-create-wd9mq" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.286823 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvpgm\" (UniqueName: \"kubernetes.io/projected/b7118342-937d-4707-b384-31729648d90d-kube-api-access-cvpgm\") pod \"cinder-db-create-wd9mq\" (UID: \"b7118342-937d-4707-b384-31729648d90d\") " pod="openstack/cinder-db-create-wd9mq" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.299056 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-7sswn"] Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.300026 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-7sswn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.318471 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-7sswn"] Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.378917 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-7dd8-account-create-update-nlgn4"] Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.379888 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7dd8-account-create-update-nlgn4" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.382045 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.388015 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86982bf5-4b00-4172-b9bc-ac852da2c721-operator-scripts\") pod \"barbican-db-create-7sswn\" (UID: \"86982bf5-4b00-4172-b9bc-ac852da2c721\") " pod="openstack/barbican-db-create-7sswn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.388134 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7118342-937d-4707-b384-31729648d90d-operator-scripts\") pod \"cinder-db-create-wd9mq\" (UID: \"b7118342-937d-4707-b384-31729648d90d\") " pod="openstack/cinder-db-create-wd9mq" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.388266 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvpgm\" (UniqueName: \"kubernetes.io/projected/b7118342-937d-4707-b384-31729648d90d-kube-api-access-cvpgm\") pod \"cinder-db-create-wd9mq\" (UID: \"b7118342-937d-4707-b384-31729648d90d\") " pod="openstack/cinder-db-create-wd9mq" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.388321 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkwxz\" (UniqueName: \"kubernetes.io/projected/86982bf5-4b00-4172-b9bc-ac852da2c721-kube-api-access-mkwxz\") pod \"barbican-db-create-7sswn\" (UID: \"86982bf5-4b00-4172-b9bc-ac852da2c721\") " pod="openstack/barbican-db-create-7sswn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.388879 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7118342-937d-4707-b384-31729648d90d-operator-scripts\") pod \"cinder-db-create-wd9mq\" (UID: \"b7118342-937d-4707-b384-31729648d90d\") " pod="openstack/cinder-db-create-wd9mq" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.394938 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-7dd8-account-create-update-nlgn4"] Nov 28 13:40:02 crc kubenswrapper[4857]: E1128 13:40:02.398641 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"container-server\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-replicator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-auditor\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-updater\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\"]" pod="openstack/swift-storage-0" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.419316 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvpgm\" (UniqueName: \"kubernetes.io/projected/b7118342-937d-4707-b384-31729648d90d-kube-api-access-cvpgm\") pod \"cinder-db-create-wd9mq\" (UID: \"b7118342-937d-4707-b384-31729648d90d\") " pod="openstack/cinder-db-create-wd9mq" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.499520 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-wd9mq" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.499717 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3ffaab3-cd89-4c3c-87fb-6862af41d2cb-operator-scripts\") pod \"barbican-7dd8-account-create-update-nlgn4\" (UID: \"d3ffaab3-cd89-4c3c-87fb-6862af41d2cb\") " pod="openstack/barbican-7dd8-account-create-update-nlgn4" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.501018 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkwxz\" (UniqueName: \"kubernetes.io/projected/86982bf5-4b00-4172-b9bc-ac852da2c721-kube-api-access-mkwxz\") pod \"barbican-db-create-7sswn\" (UID: \"86982bf5-4b00-4172-b9bc-ac852da2c721\") " pod="openstack/barbican-db-create-7sswn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.501077 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jm88f\" (UniqueName: \"kubernetes.io/projected/d3ffaab3-cd89-4c3c-87fb-6862af41d2cb-kube-api-access-jm88f\") pod \"barbican-7dd8-account-create-update-nlgn4\" (UID: \"d3ffaab3-cd89-4c3c-87fb-6862af41d2cb\") " pod="openstack/barbican-7dd8-account-create-update-nlgn4" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.501121 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86982bf5-4b00-4172-b9bc-ac852da2c721-operator-scripts\") pod \"barbican-db-create-7sswn\" (UID: \"86982bf5-4b00-4172-b9bc-ac852da2c721\") " pod="openstack/barbican-db-create-7sswn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.502663 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86982bf5-4b00-4172-b9bc-ac852da2c721-operator-scripts\") pod \"barbican-db-create-7sswn\" (UID: \"86982bf5-4b00-4172-b9bc-ac852da2c721\") " pod="openstack/barbican-db-create-7sswn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.506734 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-d8b3-account-create-update-r4qcn"] Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.507861 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d8b3-account-create-update-r4qcn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.514625 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-d8b3-account-create-update-r4qcn"] Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.515940 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.568197 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkwxz\" (UniqueName: \"kubernetes.io/projected/86982bf5-4b00-4172-b9bc-ac852da2c721-kube-api-access-mkwxz\") pod \"barbican-db-create-7sswn\" (UID: \"86982bf5-4b00-4172-b9bc-ac852da2c721\") " pod="openstack/barbican-db-create-7sswn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.603727 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jm88f\" (UniqueName: \"kubernetes.io/projected/d3ffaab3-cd89-4c3c-87fb-6862af41d2cb-kube-api-access-jm88f\") pod \"barbican-7dd8-account-create-update-nlgn4\" (UID: \"d3ffaab3-cd89-4c3c-87fb-6862af41d2cb\") " pod="openstack/barbican-7dd8-account-create-update-nlgn4" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.603918 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnw8f\" (UniqueName: \"kubernetes.io/projected/a25e9cdb-52db-418e-a094-2c6e0cc860eb-kube-api-access-jnw8f\") pod \"cinder-d8b3-account-create-update-r4qcn\" (UID: \"a25e9cdb-52db-418e-a094-2c6e0cc860eb\") " pod="openstack/cinder-d8b3-account-create-update-r4qcn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.604089 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a25e9cdb-52db-418e-a094-2c6e0cc860eb-operator-scripts\") pod \"cinder-d8b3-account-create-update-r4qcn\" (UID: \"a25e9cdb-52db-418e-a094-2c6e0cc860eb\") " pod="openstack/cinder-d8b3-account-create-update-r4qcn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.604193 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3ffaab3-cd89-4c3c-87fb-6862af41d2cb-operator-scripts\") pod \"barbican-7dd8-account-create-update-nlgn4\" (UID: \"d3ffaab3-cd89-4c3c-87fb-6862af41d2cb\") " pod="openstack/barbican-7dd8-account-create-update-nlgn4" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.604854 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3ffaab3-cd89-4c3c-87fb-6862af41d2cb-operator-scripts\") pod \"barbican-7dd8-account-create-update-nlgn4\" (UID: \"d3ffaab3-cd89-4c3c-87fb-6862af41d2cb\") " pod="openstack/barbican-7dd8-account-create-update-nlgn4" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.612255 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-7sswn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.631240 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jm88f\" (UniqueName: \"kubernetes.io/projected/d3ffaab3-cd89-4c3c-87fb-6862af41d2cb-kube-api-access-jm88f\") pod \"barbican-7dd8-account-create-update-nlgn4\" (UID: \"d3ffaab3-cd89-4c3c-87fb-6862af41d2cb\") " pod="openstack/barbican-7dd8-account-create-update-nlgn4" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.688869 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-x2hhj"] Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.691446 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-x2hhj" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.697582 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7dd8-account-create-update-nlgn4" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.701612 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-x2hhj"] Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.706531 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a25e9cdb-52db-418e-a094-2c6e0cc860eb-operator-scripts\") pod \"cinder-d8b3-account-create-update-r4qcn\" (UID: \"a25e9cdb-52db-418e-a094-2c6e0cc860eb\") " pod="openstack/cinder-d8b3-account-create-update-r4qcn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.706643 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnw8f\" (UniqueName: \"kubernetes.io/projected/a25e9cdb-52db-418e-a094-2c6e0cc860eb-kube-api-access-jnw8f\") pod \"cinder-d8b3-account-create-update-r4qcn\" (UID: \"a25e9cdb-52db-418e-a094-2c6e0cc860eb\") " pod="openstack/cinder-d8b3-account-create-update-r4qcn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.718337 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a25e9cdb-52db-418e-a094-2c6e0cc860eb-operator-scripts\") pod \"cinder-d8b3-account-create-update-r4qcn\" (UID: \"a25e9cdb-52db-418e-a094-2c6e0cc860eb\") " pod="openstack/cinder-d8b3-account-create-update-r4qcn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.741711 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.747105 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnw8f\" (UniqueName: \"kubernetes.io/projected/a25e9cdb-52db-418e-a094-2c6e0cc860eb-kube-api-access-jnw8f\") pod \"cinder-d8b3-account-create-update-r4qcn\" (UID: \"a25e9cdb-52db-418e-a094-2c6e0cc860eb\") " pod="openstack/cinder-d8b3-account-create-update-r4qcn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.762393 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-cwhln"] Nov 28 13:40:02 crc kubenswrapper[4857]: E1128 13:40:02.763145 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16e90833-677c-446f-afaa-58f6a157101b" containerName="ovn-config" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.763161 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="16e90833-677c-446f-afaa-58f6a157101b" containerName="ovn-config" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.763521 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="16e90833-677c-446f-afaa-58f6a157101b" containerName="ovn-config" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.764266 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-cwhln" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.769424 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.769468 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.769746 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-bzgqn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.779095 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.798624 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-cwhln"] Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.807836 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/16e90833-677c-446f-afaa-58f6a157101b-var-run-ovn\") pod \"16e90833-677c-446f-afaa-58f6a157101b\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.807933 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-52b5d\" (UniqueName: \"kubernetes.io/projected/16e90833-677c-446f-afaa-58f6a157101b-kube-api-access-52b5d\") pod \"16e90833-677c-446f-afaa-58f6a157101b\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.807969 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/16e90833-677c-446f-afaa-58f6a157101b-additional-scripts\") pod \"16e90833-677c-446f-afaa-58f6a157101b\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.807987 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/16e90833-677c-446f-afaa-58f6a157101b-var-run\") pod \"16e90833-677c-446f-afaa-58f6a157101b\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.808032 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/16e90833-677c-446f-afaa-58f6a157101b-scripts\") pod \"16e90833-677c-446f-afaa-58f6a157101b\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.808080 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/16e90833-677c-446f-afaa-58f6a157101b-var-log-ovn\") pod \"16e90833-677c-446f-afaa-58f6a157101b\" (UID: \"16e90833-677c-446f-afaa-58f6a157101b\") " Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.808259 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hhzq\" (UniqueName: \"kubernetes.io/projected/a914f102-2a88-4272-933b-2f108273c581-kube-api-access-7hhzq\") pod \"keystone-db-sync-cwhln\" (UID: \"a914f102-2a88-4272-933b-2f108273c581\") " pod="openstack/keystone-db-sync-cwhln" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.808319 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a914f102-2a88-4272-933b-2f108273c581-config-data\") pod \"keystone-db-sync-cwhln\" (UID: \"a914f102-2a88-4272-933b-2f108273c581\") " pod="openstack/keystone-db-sync-cwhln" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.808350 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a914f102-2a88-4272-933b-2f108273c581-combined-ca-bundle\") pod \"keystone-db-sync-cwhln\" (UID: \"a914f102-2a88-4272-933b-2f108273c581\") " pod="openstack/keystone-db-sync-cwhln" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.808438 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e640153-f178-4532-af27-302cf3098ef4-operator-scripts\") pod \"neutron-db-create-x2hhj\" (UID: \"2e640153-f178-4532-af27-302cf3098ef4\") " pod="openstack/neutron-db-create-x2hhj" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.808461 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfgk4\" (UniqueName: \"kubernetes.io/projected/2e640153-f178-4532-af27-302cf3098ef4-kube-api-access-sfgk4\") pod \"neutron-db-create-x2hhj\" (UID: \"2e640153-f178-4532-af27-302cf3098ef4\") " pod="openstack/neutron-db-create-x2hhj" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.808597 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/16e90833-677c-446f-afaa-58f6a157101b-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "16e90833-677c-446f-afaa-58f6a157101b" (UID: "16e90833-677c-446f-afaa-58f6a157101b"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.811912 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16e90833-677c-446f-afaa-58f6a157101b-kube-api-access-52b5d" (OuterVolumeSpecName: "kube-api-access-52b5d") pod "16e90833-677c-446f-afaa-58f6a157101b" (UID: "16e90833-677c-446f-afaa-58f6a157101b"). InnerVolumeSpecName "kube-api-access-52b5d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.812577 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16e90833-677c-446f-afaa-58f6a157101b-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "16e90833-677c-446f-afaa-58f6a157101b" (UID: "16e90833-677c-446f-afaa-58f6a157101b"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.812607 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/16e90833-677c-446f-afaa-58f6a157101b-var-run" (OuterVolumeSpecName: "var-run") pod "16e90833-677c-446f-afaa-58f6a157101b" (UID: "16e90833-677c-446f-afaa-58f6a157101b"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.813430 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16e90833-677c-446f-afaa-58f6a157101b-scripts" (OuterVolumeSpecName: "scripts") pod "16e90833-677c-446f-afaa-58f6a157101b" (UID: "16e90833-677c-446f-afaa-58f6a157101b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.813467 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/16e90833-677c-446f-afaa-58f6a157101b-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "16e90833-677c-446f-afaa-58f6a157101b" (UID: "16e90833-677c-446f-afaa-58f6a157101b"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.822809 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5867-account-create-update-j7btn"] Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.823969 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5867-account-create-update-j7btn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.827001 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.830068 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5867-account-create-update-j7btn"] Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.904592 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d8b3-account-create-update-r4qcn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.910543 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hhzq\" (UniqueName: \"kubernetes.io/projected/a914f102-2a88-4272-933b-2f108273c581-kube-api-access-7hhzq\") pod \"keystone-db-sync-cwhln\" (UID: \"a914f102-2a88-4272-933b-2f108273c581\") " pod="openstack/keystone-db-sync-cwhln" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.910640 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a914f102-2a88-4272-933b-2f108273c581-config-data\") pod \"keystone-db-sync-cwhln\" (UID: \"a914f102-2a88-4272-933b-2f108273c581\") " pod="openstack/keystone-db-sync-cwhln" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.910683 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a914f102-2a88-4272-933b-2f108273c581-combined-ca-bundle\") pod \"keystone-db-sync-cwhln\" (UID: \"a914f102-2a88-4272-933b-2f108273c581\") " pod="openstack/keystone-db-sync-cwhln" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.910772 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633-operator-scripts\") pod \"neutron-5867-account-create-update-j7btn\" (UID: \"9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633\") " pod="openstack/neutron-5867-account-create-update-j7btn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.910806 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e640153-f178-4532-af27-302cf3098ef4-operator-scripts\") pod \"neutron-db-create-x2hhj\" (UID: \"2e640153-f178-4532-af27-302cf3098ef4\") " pod="openstack/neutron-db-create-x2hhj" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.910826 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfgk4\" (UniqueName: \"kubernetes.io/projected/2e640153-f178-4532-af27-302cf3098ef4-kube-api-access-sfgk4\") pod \"neutron-db-create-x2hhj\" (UID: \"2e640153-f178-4532-af27-302cf3098ef4\") " pod="openstack/neutron-db-create-x2hhj" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.910876 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xxhl\" (UniqueName: \"kubernetes.io/projected/9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633-kube-api-access-5xxhl\") pod \"neutron-5867-account-create-update-j7btn\" (UID: \"9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633\") " pod="openstack/neutron-5867-account-create-update-j7btn" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.910935 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-52b5d\" (UniqueName: \"kubernetes.io/projected/16e90833-677c-446f-afaa-58f6a157101b-kube-api-access-52b5d\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.910950 4857 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/16e90833-677c-446f-afaa-58f6a157101b-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.910973 4857 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/16e90833-677c-446f-afaa-58f6a157101b-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.910983 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/16e90833-677c-446f-afaa-58f6a157101b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.910994 4857 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/16e90833-677c-446f-afaa-58f6a157101b-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.911004 4857 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/16e90833-677c-446f-afaa-58f6a157101b-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.915993 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e640153-f178-4532-af27-302cf3098ef4-operator-scripts\") pod \"neutron-db-create-x2hhj\" (UID: \"2e640153-f178-4532-af27-302cf3098ef4\") " pod="openstack/neutron-db-create-x2hhj" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.916609 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a914f102-2a88-4272-933b-2f108273c581-config-data\") pod \"keystone-db-sync-cwhln\" (UID: \"a914f102-2a88-4272-933b-2f108273c581\") " pod="openstack/keystone-db-sync-cwhln" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.918642 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a914f102-2a88-4272-933b-2f108273c581-combined-ca-bundle\") pod \"keystone-db-sync-cwhln\" (UID: \"a914f102-2a88-4272-933b-2f108273c581\") " pod="openstack/keystone-db-sync-cwhln" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.938038 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hhzq\" (UniqueName: \"kubernetes.io/projected/a914f102-2a88-4272-933b-2f108273c581-kube-api-access-7hhzq\") pod \"keystone-db-sync-cwhln\" (UID: \"a914f102-2a88-4272-933b-2f108273c581\") " pod="openstack/keystone-db-sync-cwhln" Nov 28 13:40:02 crc kubenswrapper[4857]: I1128 13:40:02.938437 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfgk4\" (UniqueName: \"kubernetes.io/projected/2e640153-f178-4532-af27-302cf3098ef4-kube-api-access-sfgk4\") pod \"neutron-db-create-x2hhj\" (UID: \"2e640153-f178-4532-af27-302cf3098ef4\") " pod="openstack/neutron-db-create-x2hhj" Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.012893 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633-operator-scripts\") pod \"neutron-5867-account-create-update-j7btn\" (UID: \"9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633\") " pod="openstack/neutron-5867-account-create-update-j7btn" Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.012969 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xxhl\" (UniqueName: \"kubernetes.io/projected/9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633-kube-api-access-5xxhl\") pod \"neutron-5867-account-create-update-j7btn\" (UID: \"9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633\") " pod="openstack/neutron-5867-account-create-update-j7btn" Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.014019 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633-operator-scripts\") pod \"neutron-5867-account-create-update-j7btn\" (UID: \"9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633\") " pod="openstack/neutron-5867-account-create-update-j7btn" Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.029236 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xxhl\" (UniqueName: \"kubernetes.io/projected/9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633-kube-api-access-5xxhl\") pod \"neutron-5867-account-create-update-j7btn\" (UID: \"9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633\") " pod="openstack/neutron-5867-account-create-update-j7btn" Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.040124 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-x2hhj" Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.098893 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-cwhln" Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.113603 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-wd9mq"] Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.146355 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5867-account-create-update-j7btn" Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.179510 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.179557 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.195810 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-d8b3-account-create-update-r4qcn"] Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.215259 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-7sswn"] Nov 28 13:40:03 crc kubenswrapper[4857]: W1128 13:40:03.228099 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod86982bf5_4b00_4172_b9bc_ac852da2c721.slice/crio-76226a1aece6f5e7aab6d929d8b0daf8e22f28fe5751a25d099f3f54c07a40d1 WatchSource:0}: Error finding container 76226a1aece6f5e7aab6d929d8b0daf8e22f28fe5751a25d099f3f54c07a40d1: Status 404 returned error can't find the container with id 76226a1aece6f5e7aab6d929d8b0daf8e22f28fe5751a25d099f3f54c07a40d1 Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.315647 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-7dd8-account-create-update-nlgn4"] Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.409111 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-wd9mq" event={"ID":"b7118342-937d-4707-b384-31729648d90d","Type":"ContainerStarted","Data":"80176df7559b27db79394699a5b40d242544bdb847029d97c8524aa4212d7322"} Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.411370 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-7sswn" event={"ID":"86982bf5-4b00-4172-b9bc-ac852da2c721","Type":"ContainerStarted","Data":"76226a1aece6f5e7aab6d929d8b0daf8e22f28fe5751a25d099f3f54c07a40d1"} Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.416060 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d8b3-account-create-update-r4qcn" event={"ID":"a25e9cdb-52db-418e-a094-2c6e0cc860eb","Type":"ContainerStarted","Data":"ce482de43f373faa8e986dbe57f08ee83a06d5e809d348fac94ee0bd3700c7e7"} Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.417708 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-t99ql-config-btx5d" event={"ID":"16e90833-677c-446f-afaa-58f6a157101b","Type":"ContainerDied","Data":"39afce31df283e11689a0f4b07f50e3b59e9a68812c6a2dbeeee606d56090cf1"} Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.417781 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39afce31df283e11689a0f4b07f50e3b59e9a68812c6a2dbeeee606d56090cf1" Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.417854 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-t99ql-config-btx5d" Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.425832 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-7dd8-account-create-update-nlgn4" event={"ID":"d3ffaab3-cd89-4c3c-87fb-6862af41d2cb","Type":"ContainerStarted","Data":"20715ce34ccaa7affe4d8933a0be458ca47c918edd58fc6634f89dcf55f5e772"} Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.494741 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5867-account-create-update-j7btn"] Nov 28 13:40:03 crc kubenswrapper[4857]: W1128 13:40:03.503256 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ffd5c4d_7e4f_43b8_8b18_5b2a18b33633.slice/crio-d594a8ff80c7bb7a96c5a461a72e4edb22cdf1784dcd19139127843c3bc92c7f WatchSource:0}: Error finding container d594a8ff80c7bb7a96c5a461a72e4edb22cdf1784dcd19139127843c3bc92c7f: Status 404 returned error can't find the container with id d594a8ff80c7bb7a96c5a461a72e4edb22cdf1784dcd19139127843c3bc92c7f Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.543224 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-x2hhj"] Nov 28 13:40:03 crc kubenswrapper[4857]: W1128 13:40:03.547517 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2e640153_f178_4532_af27_302cf3098ef4.slice/crio-a362b866581294506feed427bdf503fcf8070d01f7e6b11e00304b193290d235 WatchSource:0}: Error finding container a362b866581294506feed427bdf503fcf8070d01f7e6b11e00304b193290d235: Status 404 returned error can't find the container with id a362b866581294506feed427bdf503fcf8070d01f7e6b11e00304b193290d235 Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.651939 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-cwhln"] Nov 28 13:40:03 crc kubenswrapper[4857]: W1128 13:40:03.652932 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda914f102_2a88_4272_933b_2f108273c581.slice/crio-b32dbb7b1864f0d78b2f96e7e89d20d7c835178d968e85a7a1ad9faba0a5164b WatchSource:0}: Error finding container b32dbb7b1864f0d78b2f96e7e89d20d7c835178d968e85a7a1ad9faba0a5164b: Status 404 returned error can't find the container with id b32dbb7b1864f0d78b2f96e7e89d20d7c835178d968e85a7a1ad9faba0a5164b Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.846462 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-t99ql-config-btx5d"] Nov 28 13:40:03 crc kubenswrapper[4857]: I1128 13:40:03.858419 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-t99ql-config-btx5d"] Nov 28 13:40:04 crc kubenswrapper[4857]: I1128 13:40:04.320308 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16e90833-677c-446f-afaa-58f6a157101b" path="/var/lib/kubelet/pods/16e90833-677c-446f-afaa-58f6a157101b/volumes" Nov 28 13:40:04 crc kubenswrapper[4857]: I1128 13:40:04.435890 4857 generic.go:334] "Generic (PLEG): container finished" podID="b7118342-937d-4707-b384-31729648d90d" containerID="c5fa9e8bb8c361f1c6685760ea46af512b07d5764950a9492028f7c42b4af089" exitCode=0 Nov 28 13:40:04 crc kubenswrapper[4857]: I1128 13:40:04.435962 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-wd9mq" event={"ID":"b7118342-937d-4707-b384-31729648d90d","Type":"ContainerDied","Data":"c5fa9e8bb8c361f1c6685760ea46af512b07d5764950a9492028f7c42b4af089"} Nov 28 13:40:04 crc kubenswrapper[4857]: I1128 13:40:04.441921 4857 generic.go:334] "Generic (PLEG): container finished" podID="86982bf5-4b00-4172-b9bc-ac852da2c721" containerID="2d6d6c3dd1604b94e9e508e69b19b0de5d2d7a133234920d03dc0f4547ebbfba" exitCode=0 Nov 28 13:40:04 crc kubenswrapper[4857]: I1128 13:40:04.441998 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-7sswn" event={"ID":"86982bf5-4b00-4172-b9bc-ac852da2c721","Type":"ContainerDied","Data":"2d6d6c3dd1604b94e9e508e69b19b0de5d2d7a133234920d03dc0f4547ebbfba"} Nov 28 13:40:04 crc kubenswrapper[4857]: I1128 13:40:04.443429 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-cwhln" event={"ID":"a914f102-2a88-4272-933b-2f108273c581","Type":"ContainerStarted","Data":"b32dbb7b1864f0d78b2f96e7e89d20d7c835178d968e85a7a1ad9faba0a5164b"} Nov 28 13:40:04 crc kubenswrapper[4857]: I1128 13:40:04.445075 4857 generic.go:334] "Generic (PLEG): container finished" podID="2e640153-f178-4532-af27-302cf3098ef4" containerID="2676ac5d14e3c0810d8b7bcc46f55ae7cc4e6f8b9048e707dd961359a7bd5c41" exitCode=0 Nov 28 13:40:04 crc kubenswrapper[4857]: I1128 13:40:04.445137 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-x2hhj" event={"ID":"2e640153-f178-4532-af27-302cf3098ef4","Type":"ContainerDied","Data":"2676ac5d14e3c0810d8b7bcc46f55ae7cc4e6f8b9048e707dd961359a7bd5c41"} Nov 28 13:40:04 crc kubenswrapper[4857]: I1128 13:40:04.445163 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-x2hhj" event={"ID":"2e640153-f178-4532-af27-302cf3098ef4","Type":"ContainerStarted","Data":"a362b866581294506feed427bdf503fcf8070d01f7e6b11e00304b193290d235"} Nov 28 13:40:04 crc kubenswrapper[4857]: I1128 13:40:04.446426 4857 generic.go:334] "Generic (PLEG): container finished" podID="a25e9cdb-52db-418e-a094-2c6e0cc860eb" containerID="9bebe354f4e687668c7b355847459f71870ebcf2d1bbe3d8bce669ca51585b5b" exitCode=0 Nov 28 13:40:04 crc kubenswrapper[4857]: I1128 13:40:04.446473 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d8b3-account-create-update-r4qcn" event={"ID":"a25e9cdb-52db-418e-a094-2c6e0cc860eb","Type":"ContainerDied","Data":"9bebe354f4e687668c7b355847459f71870ebcf2d1bbe3d8bce669ca51585b5b"} Nov 28 13:40:04 crc kubenswrapper[4857]: I1128 13:40:04.448243 4857 generic.go:334] "Generic (PLEG): container finished" podID="9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633" containerID="647db67975f4acdafaff02496b5e1bc40cc59e718b88965d4617b2338c512f5f" exitCode=0 Nov 28 13:40:04 crc kubenswrapper[4857]: I1128 13:40:04.448299 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5867-account-create-update-j7btn" event={"ID":"9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633","Type":"ContainerDied","Data":"647db67975f4acdafaff02496b5e1bc40cc59e718b88965d4617b2338c512f5f"} Nov 28 13:40:04 crc kubenswrapper[4857]: I1128 13:40:04.448316 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5867-account-create-update-j7btn" event={"ID":"9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633","Type":"ContainerStarted","Data":"d594a8ff80c7bb7a96c5a461a72e4edb22cdf1784dcd19139127843c3bc92c7f"} Nov 28 13:40:04 crc kubenswrapper[4857]: I1128 13:40:04.451126 4857 generic.go:334] "Generic (PLEG): container finished" podID="d3ffaab3-cd89-4c3c-87fb-6862af41d2cb" containerID="88d66ca4343559d7d4481c694db17d2be3dfdb37e9c02ffc49a3d608062a5a93" exitCode=0 Nov 28 13:40:04 crc kubenswrapper[4857]: I1128 13:40:04.451192 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-7dd8-account-create-update-nlgn4" event={"ID":"d3ffaab3-cd89-4c3c-87fb-6862af41d2cb","Type":"ContainerDied","Data":"88d66ca4343559d7d4481c694db17d2be3dfdb37e9c02ffc49a3d608062a5a93"} Nov 28 13:40:05 crc kubenswrapper[4857]: I1128 13:40:05.823861 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-x2hhj" Nov 28 13:40:05 crc kubenswrapper[4857]: I1128 13:40:05.969569 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e640153-f178-4532-af27-302cf3098ef4-operator-scripts\") pod \"2e640153-f178-4532-af27-302cf3098ef4\" (UID: \"2e640153-f178-4532-af27-302cf3098ef4\") " Nov 28 13:40:05 crc kubenswrapper[4857]: I1128 13:40:05.969671 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sfgk4\" (UniqueName: \"kubernetes.io/projected/2e640153-f178-4532-af27-302cf3098ef4-kube-api-access-sfgk4\") pod \"2e640153-f178-4532-af27-302cf3098ef4\" (UID: \"2e640153-f178-4532-af27-302cf3098ef4\") " Nov 28 13:40:05 crc kubenswrapper[4857]: I1128 13:40:05.970600 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e640153-f178-4532-af27-302cf3098ef4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2e640153-f178-4532-af27-302cf3098ef4" (UID: "2e640153-f178-4532-af27-302cf3098ef4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:05 crc kubenswrapper[4857]: I1128 13:40:05.979351 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e640153-f178-4532-af27-302cf3098ef4-kube-api-access-sfgk4" (OuterVolumeSpecName: "kube-api-access-sfgk4") pod "2e640153-f178-4532-af27-302cf3098ef4" (UID: "2e640153-f178-4532-af27-302cf3098ef4"). InnerVolumeSpecName "kube-api-access-sfgk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.030911 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-7sswn" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.038866 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7dd8-account-create-update-nlgn4" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.056164 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-wd9mq" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.058994 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d8b3-account-create-update-r4qcn" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.068327 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5867-account-create-update-j7btn" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.072144 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e640153-f178-4532-af27-302cf3098ef4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.072171 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sfgk4\" (UniqueName: \"kubernetes.io/projected/2e640153-f178-4532-af27-302cf3098ef4-kube-api-access-sfgk4\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.173196 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3ffaab3-cd89-4c3c-87fb-6862af41d2cb-operator-scripts\") pod \"d3ffaab3-cd89-4c3c-87fb-6862af41d2cb\" (UID: \"d3ffaab3-cd89-4c3c-87fb-6862af41d2cb\") " Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.173242 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jnw8f\" (UniqueName: \"kubernetes.io/projected/a25e9cdb-52db-418e-a094-2c6e0cc860eb-kube-api-access-jnw8f\") pod \"a25e9cdb-52db-418e-a094-2c6e0cc860eb\" (UID: \"a25e9cdb-52db-418e-a094-2c6e0cc860eb\") " Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.173295 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a25e9cdb-52db-418e-a094-2c6e0cc860eb-operator-scripts\") pod \"a25e9cdb-52db-418e-a094-2c6e0cc860eb\" (UID: \"a25e9cdb-52db-418e-a094-2c6e0cc860eb\") " Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.173327 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jm88f\" (UniqueName: \"kubernetes.io/projected/d3ffaab3-cd89-4c3c-87fb-6862af41d2cb-kube-api-access-jm88f\") pod \"d3ffaab3-cd89-4c3c-87fb-6862af41d2cb\" (UID: \"d3ffaab3-cd89-4c3c-87fb-6862af41d2cb\") " Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.173363 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvpgm\" (UniqueName: \"kubernetes.io/projected/b7118342-937d-4707-b384-31729648d90d-kube-api-access-cvpgm\") pod \"b7118342-937d-4707-b384-31729648d90d\" (UID: \"b7118342-937d-4707-b384-31729648d90d\") " Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.173434 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkwxz\" (UniqueName: \"kubernetes.io/projected/86982bf5-4b00-4172-b9bc-ac852da2c721-kube-api-access-mkwxz\") pod \"86982bf5-4b00-4172-b9bc-ac852da2c721\" (UID: \"86982bf5-4b00-4172-b9bc-ac852da2c721\") " Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.173469 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86982bf5-4b00-4172-b9bc-ac852da2c721-operator-scripts\") pod \"86982bf5-4b00-4172-b9bc-ac852da2c721\" (UID: \"86982bf5-4b00-4172-b9bc-ac852da2c721\") " Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.173522 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xxhl\" (UniqueName: \"kubernetes.io/projected/9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633-kube-api-access-5xxhl\") pod \"9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633\" (UID: \"9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633\") " Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.173589 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633-operator-scripts\") pod \"9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633\" (UID: \"9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633\") " Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.173662 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7118342-937d-4707-b384-31729648d90d-operator-scripts\") pod \"b7118342-937d-4707-b384-31729648d90d\" (UID: \"b7118342-937d-4707-b384-31729648d90d\") " Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.174231 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3ffaab3-cd89-4c3c-87fb-6862af41d2cb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d3ffaab3-cd89-4c3c-87fb-6862af41d2cb" (UID: "d3ffaab3-cd89-4c3c-87fb-6862af41d2cb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.174224 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a25e9cdb-52db-418e-a094-2c6e0cc860eb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a25e9cdb-52db-418e-a094-2c6e0cc860eb" (UID: "a25e9cdb-52db-418e-a094-2c6e0cc860eb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.174547 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7118342-937d-4707-b384-31729648d90d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b7118342-937d-4707-b384-31729648d90d" (UID: "b7118342-937d-4707-b384-31729648d90d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.174922 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86982bf5-4b00-4172-b9bc-ac852da2c721-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "86982bf5-4b00-4172-b9bc-ac852da2c721" (UID: "86982bf5-4b00-4172-b9bc-ac852da2c721"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.174920 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633" (UID: "9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.177889 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7118342-937d-4707-b384-31729648d90d-kube-api-access-cvpgm" (OuterVolumeSpecName: "kube-api-access-cvpgm") pod "b7118342-937d-4707-b384-31729648d90d" (UID: "b7118342-937d-4707-b384-31729648d90d"). InnerVolumeSpecName "kube-api-access-cvpgm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.181992 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3ffaab3-cd89-4c3c-87fb-6862af41d2cb-kube-api-access-jm88f" (OuterVolumeSpecName: "kube-api-access-jm88f") pod "d3ffaab3-cd89-4c3c-87fb-6862af41d2cb" (UID: "d3ffaab3-cd89-4c3c-87fb-6862af41d2cb"). InnerVolumeSpecName "kube-api-access-jm88f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.182051 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86982bf5-4b00-4172-b9bc-ac852da2c721-kube-api-access-mkwxz" (OuterVolumeSpecName: "kube-api-access-mkwxz") pod "86982bf5-4b00-4172-b9bc-ac852da2c721" (UID: "86982bf5-4b00-4172-b9bc-ac852da2c721"). InnerVolumeSpecName "kube-api-access-mkwxz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.189401 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633-kube-api-access-5xxhl" (OuterVolumeSpecName: "kube-api-access-5xxhl") pod "9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633" (UID: "9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633"). InnerVolumeSpecName "kube-api-access-5xxhl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.194762 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a25e9cdb-52db-418e-a094-2c6e0cc860eb-kube-api-access-jnw8f" (OuterVolumeSpecName: "kube-api-access-jnw8f") pod "a25e9cdb-52db-418e-a094-2c6e0cc860eb" (UID: "a25e9cdb-52db-418e-a094-2c6e0cc860eb"). InnerVolumeSpecName "kube-api-access-jnw8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.275714 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3ffaab3-cd89-4c3c-87fb-6862af41d2cb-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.275822 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jnw8f\" (UniqueName: \"kubernetes.io/projected/a25e9cdb-52db-418e-a094-2c6e0cc860eb-kube-api-access-jnw8f\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.275905 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a25e9cdb-52db-418e-a094-2c6e0cc860eb-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.275918 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jm88f\" (UniqueName: \"kubernetes.io/projected/d3ffaab3-cd89-4c3c-87fb-6862af41d2cb-kube-api-access-jm88f\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.275930 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cvpgm\" (UniqueName: \"kubernetes.io/projected/b7118342-937d-4707-b384-31729648d90d-kube-api-access-cvpgm\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.275941 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkwxz\" (UniqueName: \"kubernetes.io/projected/86982bf5-4b00-4172-b9bc-ac852da2c721-kube-api-access-mkwxz\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.275952 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86982bf5-4b00-4172-b9bc-ac852da2c721-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.280612 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xxhl\" (UniqueName: \"kubernetes.io/projected/9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633-kube-api-access-5xxhl\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.280646 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.280678 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7118342-937d-4707-b384-31729648d90d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.472049 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-7dd8-account-create-update-nlgn4" event={"ID":"d3ffaab3-cd89-4c3c-87fb-6862af41d2cb","Type":"ContainerDied","Data":"20715ce34ccaa7affe4d8933a0be458ca47c918edd58fc6634f89dcf55f5e772"} Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.472092 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20715ce34ccaa7affe4d8933a0be458ca47c918edd58fc6634f89dcf55f5e772" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.472197 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7dd8-account-create-update-nlgn4" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.476725 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-wd9mq" event={"ID":"b7118342-937d-4707-b384-31729648d90d","Type":"ContainerDied","Data":"80176df7559b27db79394699a5b40d242544bdb847029d97c8524aa4212d7322"} Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.476758 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="80176df7559b27db79394699a5b40d242544bdb847029d97c8524aa4212d7322" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.476827 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-wd9mq" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.483163 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-7sswn" event={"ID":"86982bf5-4b00-4172-b9bc-ac852da2c721","Type":"ContainerDied","Data":"76226a1aece6f5e7aab6d929d8b0daf8e22f28fe5751a25d099f3f54c07a40d1"} Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.483206 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76226a1aece6f5e7aab6d929d8b0daf8e22f28fe5751a25d099f3f54c07a40d1" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.483213 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-7sswn" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.485024 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-x2hhj" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.485009 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-x2hhj" event={"ID":"2e640153-f178-4532-af27-302cf3098ef4","Type":"ContainerDied","Data":"a362b866581294506feed427bdf503fcf8070d01f7e6b11e00304b193290d235"} Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.485235 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a362b866581294506feed427bdf503fcf8070d01f7e6b11e00304b193290d235" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.486616 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d8b3-account-create-update-r4qcn" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.486623 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d8b3-account-create-update-r4qcn" event={"ID":"a25e9cdb-52db-418e-a094-2c6e0cc860eb","Type":"ContainerDied","Data":"ce482de43f373faa8e986dbe57f08ee83a06d5e809d348fac94ee0bd3700c7e7"} Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.486647 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce482de43f373faa8e986dbe57f08ee83a06d5e809d348fac94ee0bd3700c7e7" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.489410 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5867-account-create-update-j7btn" event={"ID":"9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633","Type":"ContainerDied","Data":"d594a8ff80c7bb7a96c5a461a72e4edb22cdf1784dcd19139127843c3bc92c7f"} Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.489437 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d594a8ff80c7bb7a96c5a461a72e4edb22cdf1784dcd19139127843c3bc92c7f" Nov 28 13:40:06 crc kubenswrapper[4857]: I1128 13:40:06.489463 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5867-account-create-update-j7btn" Nov 28 13:40:09 crc kubenswrapper[4857]: I1128 13:40:09.530972 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-cwhln" event={"ID":"a914f102-2a88-4272-933b-2f108273c581","Type":"ContainerStarted","Data":"4b0c9ed1f44474664d82aa8cab066df058686abc50bf319b8d3ab71fdc39a7d8"} Nov 28 13:40:09 crc kubenswrapper[4857]: I1128 13:40:09.554191 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-cwhln" podStartSLOduration=2.679926649 podStartE2EDuration="7.554162839s" podCreationTimestamp="2025-11-28 13:40:02 +0000 UTC" firstStartedPulling="2025-11-28 13:40:03.656915021 +0000 UTC m=+1295.684290188" lastFinishedPulling="2025-11-28 13:40:08.531151191 +0000 UTC m=+1300.558526378" observedRunningTime="2025-11-28 13:40:09.550429961 +0000 UTC m=+1301.577805138" watchObservedRunningTime="2025-11-28 13:40:09.554162839 +0000 UTC m=+1301.581538036" Nov 28 13:40:11 crc kubenswrapper[4857]: I1128 13:40:11.556978 4857 generic.go:334] "Generic (PLEG): container finished" podID="a914f102-2a88-4272-933b-2f108273c581" containerID="4b0c9ed1f44474664d82aa8cab066df058686abc50bf319b8d3ab71fdc39a7d8" exitCode=0 Nov 28 13:40:11 crc kubenswrapper[4857]: I1128 13:40:11.557070 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-cwhln" event={"ID":"a914f102-2a88-4272-933b-2f108273c581","Type":"ContainerDied","Data":"4b0c9ed1f44474664d82aa8cab066df058686abc50bf319b8d3ab71fdc39a7d8"} Nov 28 13:40:12 crc kubenswrapper[4857]: I1128 13:40:12.946152 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-cwhln" Nov 28 13:40:12 crc kubenswrapper[4857]: I1128 13:40:12.991292 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hhzq\" (UniqueName: \"kubernetes.io/projected/a914f102-2a88-4272-933b-2f108273c581-kube-api-access-7hhzq\") pod \"a914f102-2a88-4272-933b-2f108273c581\" (UID: \"a914f102-2a88-4272-933b-2f108273c581\") " Nov 28 13:40:12 crc kubenswrapper[4857]: I1128 13:40:12.991446 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a914f102-2a88-4272-933b-2f108273c581-config-data\") pod \"a914f102-2a88-4272-933b-2f108273c581\" (UID: \"a914f102-2a88-4272-933b-2f108273c581\") " Nov 28 13:40:12 crc kubenswrapper[4857]: I1128 13:40:12.991546 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a914f102-2a88-4272-933b-2f108273c581-combined-ca-bundle\") pod \"a914f102-2a88-4272-933b-2f108273c581\" (UID: \"a914f102-2a88-4272-933b-2f108273c581\") " Nov 28 13:40:12 crc kubenswrapper[4857]: I1128 13:40:12.996896 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a914f102-2a88-4272-933b-2f108273c581-kube-api-access-7hhzq" (OuterVolumeSpecName: "kube-api-access-7hhzq") pod "a914f102-2a88-4272-933b-2f108273c581" (UID: "a914f102-2a88-4272-933b-2f108273c581"). InnerVolumeSpecName "kube-api-access-7hhzq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.020931 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a914f102-2a88-4272-933b-2f108273c581-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a914f102-2a88-4272-933b-2f108273c581" (UID: "a914f102-2a88-4272-933b-2f108273c581"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.031285 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a914f102-2a88-4272-933b-2f108273c581-config-data" (OuterVolumeSpecName: "config-data") pod "a914f102-2a88-4272-933b-2f108273c581" (UID: "a914f102-2a88-4272-933b-2f108273c581"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.093909 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a914f102-2a88-4272-933b-2f108273c581-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.093945 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a914f102-2a88-4272-933b-2f108273c581-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.093960 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7hhzq\" (UniqueName: \"kubernetes.io/projected/a914f102-2a88-4272-933b-2f108273c581-kube-api-access-7hhzq\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.578927 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-cwhln" event={"ID":"a914f102-2a88-4272-933b-2f108273c581","Type":"ContainerDied","Data":"b32dbb7b1864f0d78b2f96e7e89d20d7c835178d968e85a7a1ad9faba0a5164b"} Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.578974 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b32dbb7b1864f0d78b2f96e7e89d20d7c835178d968e85a7a1ad9faba0a5164b" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.579043 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-cwhln" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.915003 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-wqm6d"] Nov 28 13:40:13 crc kubenswrapper[4857]: E1128 13:40:13.915519 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a25e9cdb-52db-418e-a094-2c6e0cc860eb" containerName="mariadb-account-create-update" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.915533 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a25e9cdb-52db-418e-a094-2c6e0cc860eb" containerName="mariadb-account-create-update" Nov 28 13:40:13 crc kubenswrapper[4857]: E1128 13:40:13.915545 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3ffaab3-cd89-4c3c-87fb-6862af41d2cb" containerName="mariadb-account-create-update" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.915553 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3ffaab3-cd89-4c3c-87fb-6862af41d2cb" containerName="mariadb-account-create-update" Nov 28 13:40:13 crc kubenswrapper[4857]: E1128 13:40:13.915572 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7118342-937d-4707-b384-31729648d90d" containerName="mariadb-database-create" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.915578 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7118342-937d-4707-b384-31729648d90d" containerName="mariadb-database-create" Nov 28 13:40:13 crc kubenswrapper[4857]: E1128 13:40:13.915594 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e640153-f178-4532-af27-302cf3098ef4" containerName="mariadb-database-create" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.915600 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e640153-f178-4532-af27-302cf3098ef4" containerName="mariadb-database-create" Nov 28 13:40:13 crc kubenswrapper[4857]: E1128 13:40:13.915614 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86982bf5-4b00-4172-b9bc-ac852da2c721" containerName="mariadb-database-create" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.915622 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="86982bf5-4b00-4172-b9bc-ac852da2c721" containerName="mariadb-database-create" Nov 28 13:40:13 crc kubenswrapper[4857]: E1128 13:40:13.915632 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a914f102-2a88-4272-933b-2f108273c581" containerName="keystone-db-sync" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.915637 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a914f102-2a88-4272-933b-2f108273c581" containerName="keystone-db-sync" Nov 28 13:40:13 crc kubenswrapper[4857]: E1128 13:40:13.915645 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633" containerName="mariadb-account-create-update" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.915651 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633" containerName="mariadb-account-create-update" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.915799 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a914f102-2a88-4272-933b-2f108273c581" containerName="keystone-db-sync" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.915816 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="86982bf5-4b00-4172-b9bc-ac852da2c721" containerName="mariadb-database-create" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.915837 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3ffaab3-cd89-4c3c-87fb-6862af41d2cb" containerName="mariadb-account-create-update" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.915850 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7118342-937d-4707-b384-31729648d90d" containerName="mariadb-database-create" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.915861 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633" containerName="mariadb-account-create-update" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.915871 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e640153-f178-4532-af27-302cf3098ef4" containerName="mariadb-database-create" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.915882 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a25e9cdb-52db-418e-a094-2c6e0cc860eb" containerName="mariadb-account-create-update" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.916409 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.919742 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.919943 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.920064 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.920186 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.920317 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-bzgqn" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.934912 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c9d85d47c-v4nhd"] Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.936444 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.967517 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-wqm6d"] Nov 28 13:40:13 crc kubenswrapper[4857]: I1128 13:40:13.981656 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9d85d47c-v4nhd"] Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.011029 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cwr5\" (UniqueName: \"kubernetes.io/projected/51aad21c-4090-40b1-8433-eb79d6d01a68-kube-api-access-4cwr5\") pod \"dnsmasq-dns-5c9d85d47c-v4nhd\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.011082 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-config-data\") pod \"keystone-bootstrap-wqm6d\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.011118 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-config\") pod \"dnsmasq-dns-5c9d85d47c-v4nhd\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.011141 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-credential-keys\") pod \"keystone-bootstrap-wqm6d\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.011193 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-combined-ca-bundle\") pod \"keystone-bootstrap-wqm6d\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.011250 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9d85d47c-v4nhd\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.011275 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jggh4\" (UniqueName: \"kubernetes.io/projected/db417385-2d5f-42e0-97de-822e20e9e6ed-kube-api-access-jggh4\") pod \"keystone-bootstrap-wqm6d\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.011301 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9d85d47c-v4nhd\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.011321 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-dns-svc\") pod \"dnsmasq-dns-5c9d85d47c-v4nhd\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.011355 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-fernet-keys\") pod \"keystone-bootstrap-wqm6d\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.011417 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-scripts\") pod \"keystone-bootstrap-wqm6d\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.066988 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-g9jf5"] Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.072544 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.074197 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-c29m2" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.076334 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.077526 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.081726 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-g9jf5"] Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.160332 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-combined-ca-bundle\") pod \"cinder-db-sync-g9jf5\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.160457 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9d85d47c-v4nhd\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.160491 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jggh4\" (UniqueName: \"kubernetes.io/projected/db417385-2d5f-42e0-97de-822e20e9e6ed-kube-api-access-jggh4\") pod \"keystone-bootstrap-wqm6d\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.160522 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9d85d47c-v4nhd\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.160549 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-dns-svc\") pod \"dnsmasq-dns-5c9d85d47c-v4nhd\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.160594 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h72dj\" (UniqueName: \"kubernetes.io/projected/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-kube-api-access-h72dj\") pod \"cinder-db-sync-g9jf5\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.160614 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-scripts\") pod \"cinder-db-sync-g9jf5\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.160641 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-fernet-keys\") pod \"keystone-bootstrap-wqm6d\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.160720 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-config-data\") pod \"cinder-db-sync-g9jf5\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.160773 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-scripts\") pod \"keystone-bootstrap-wqm6d\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.160821 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-db-sync-config-data\") pod \"cinder-db-sync-g9jf5\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.160857 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cwr5\" (UniqueName: \"kubernetes.io/projected/51aad21c-4090-40b1-8433-eb79d6d01a68-kube-api-access-4cwr5\") pod \"dnsmasq-dns-5c9d85d47c-v4nhd\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.160880 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-config-data\") pod \"keystone-bootstrap-wqm6d\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.160911 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-config\") pod \"dnsmasq-dns-5c9d85d47c-v4nhd\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.160937 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-credential-keys\") pod \"keystone-bootstrap-wqm6d\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.161000 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-combined-ca-bundle\") pod \"keystone-bootstrap-wqm6d\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.161024 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-etc-machine-id\") pod \"cinder-db-sync-g9jf5\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.162008 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9d85d47c-v4nhd\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.165036 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-dns-svc\") pod \"dnsmasq-dns-5c9d85d47c-v4nhd\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.168017 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-config\") pod \"dnsmasq-dns-5c9d85d47c-v4nhd\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.171387 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-scripts\") pod \"keystone-bootstrap-wqm6d\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.171586 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-combined-ca-bundle\") pod \"keystone-bootstrap-wqm6d\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.171636 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-config-data\") pod \"keystone-bootstrap-wqm6d\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.171956 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9d85d47c-v4nhd\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.172130 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-fernet-keys\") pod \"keystone-bootstrap-wqm6d\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.175192 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-credential-keys\") pod \"keystone-bootstrap-wqm6d\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.186204 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.189057 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.198512 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.207373 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cwr5\" (UniqueName: \"kubernetes.io/projected/51aad21c-4090-40b1-8433-eb79d6d01a68-kube-api-access-4cwr5\") pod \"dnsmasq-dns-5c9d85d47c-v4nhd\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.214574 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.220410 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jggh4\" (UniqueName: \"kubernetes.io/projected/db417385-2d5f-42e0-97de-822e20e9e6ed-kube-api-access-jggh4\") pod \"keystone-bootstrap-wqm6d\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.220579 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-sp8xb"] Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.222920 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-sp8xb" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.240483 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.240699 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-rd8bj" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.241183 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.251980 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-46vxl"] Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.253031 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-46vxl" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.255910 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.256103 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.263971 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-96hqp" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.264883 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-db-sync-config-data\") pod \"cinder-db-sync-g9jf5\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.264946 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-etc-machine-id\") pod \"cinder-db-sync-g9jf5\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.264973 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-combined-ca-bundle\") pod \"cinder-db-sync-g9jf5\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.265020 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h72dj\" (UniqueName: \"kubernetes.io/projected/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-kube-api-access-h72dj\") pod \"cinder-db-sync-g9jf5\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.265035 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-scripts\") pod \"cinder-db-sync-g9jf5\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.265072 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-config-data\") pod \"cinder-db-sync-g9jf5\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.272488 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-config-data\") pod \"cinder-db-sync-g9jf5\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.272644 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-etc-machine-id\") pod \"cinder-db-sync-g9jf5\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.277263 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-db-sync-config-data\") pod \"cinder-db-sync-g9jf5\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.277330 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-sp8xb"] Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.285587 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-combined-ca-bundle\") pod \"cinder-db-sync-g9jf5\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.285606 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-scripts\") pod \"cinder-db-sync-g9jf5\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.308703 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.311242 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h72dj\" (UniqueName: \"kubernetes.io/projected/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-kube-api-access-h72dj\") pod \"cinder-db-sync-g9jf5\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.352806 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.361980 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-46vxl"] Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.367647 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2-combined-ca-bundle\") pod \"neutron-db-sync-46vxl\" (UID: \"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2\") " pod="openstack/neutron-db-sync-46vxl" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.367815 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-config-data\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.367840 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.367889 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvrkd\" (UniqueName: \"kubernetes.io/projected/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2-kube-api-access-lvrkd\") pod \"neutron-db-sync-46vxl\" (UID: \"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2\") " pod="openstack/neutron-db-sync-46vxl" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.367910 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5bnm\" (UniqueName: \"kubernetes.io/projected/779e7a51-657e-47ae-a068-3cd339cd9bb1-kube-api-access-l5bnm\") pod \"barbican-db-sync-sp8xb\" (UID: \"779e7a51-657e-47ae-a068-3cd339cd9bb1\") " pod="openstack/barbican-db-sync-sp8xb" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.367950 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd1c47cb-0f7e-42f1-824b-a6cef692c751-run-httpd\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.367976 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/779e7a51-657e-47ae-a068-3cd339cd9bb1-db-sync-config-data\") pod \"barbican-db-sync-sp8xb\" (UID: \"779e7a51-657e-47ae-a068-3cd339cd9bb1\") " pod="openstack/barbican-db-sync-sp8xb" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.368005 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.368049 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd1c47cb-0f7e-42f1-824b-a6cef692c751-log-httpd\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.368078 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-279bt\" (UniqueName: \"kubernetes.io/projected/dd1c47cb-0f7e-42f1-824b-a6cef692c751-kube-api-access-279bt\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.368109 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-scripts\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.368129 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/779e7a51-657e-47ae-a068-3cd339cd9bb1-combined-ca-bundle\") pod \"barbican-db-sync-sp8xb\" (UID: \"779e7a51-657e-47ae-a068-3cd339cd9bb1\") " pod="openstack/barbican-db-sync-sp8xb" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.368146 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2-config\") pod \"neutron-db-sync-46vxl\" (UID: \"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2\") " pod="openstack/neutron-db-sync-46vxl" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.405539 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-qrf4k"] Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.406627 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.406677 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.412594 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.412885 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-jffcr" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.414975 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.417396 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9d85d47c-v4nhd"] Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.436843 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-qrf4k"] Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.462946 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6ffb94d8ff-jt479"] Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.469381 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/779e7a51-657e-47ae-a068-3cd339cd9bb1-combined-ca-bundle\") pod \"barbican-db-sync-sp8xb\" (UID: \"779e7a51-657e-47ae-a068-3cd339cd9bb1\") " pod="openstack/barbican-db-sync-sp8xb" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.469420 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2-config\") pod \"neutron-db-sync-46vxl\" (UID: \"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2\") " pod="openstack/neutron-db-sync-46vxl" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.469461 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2-combined-ca-bundle\") pod \"neutron-db-sync-46vxl\" (UID: \"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2\") " pod="openstack/neutron-db-sync-46vxl" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.469544 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-config-data\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.469570 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.469604 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvrkd\" (UniqueName: \"kubernetes.io/projected/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2-kube-api-access-lvrkd\") pod \"neutron-db-sync-46vxl\" (UID: \"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2\") " pod="openstack/neutron-db-sync-46vxl" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.469629 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5bnm\" (UniqueName: \"kubernetes.io/projected/779e7a51-657e-47ae-a068-3cd339cd9bb1-kube-api-access-l5bnm\") pod \"barbican-db-sync-sp8xb\" (UID: \"779e7a51-657e-47ae-a068-3cd339cd9bb1\") " pod="openstack/barbican-db-sync-sp8xb" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.469669 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd1c47cb-0f7e-42f1-824b-a6cef692c751-run-httpd\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.469702 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/779e7a51-657e-47ae-a068-3cd339cd9bb1-db-sync-config-data\") pod \"barbican-db-sync-sp8xb\" (UID: \"779e7a51-657e-47ae-a068-3cd339cd9bb1\") " pod="openstack/barbican-db-sync-sp8xb" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.469773 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.469803 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd1c47cb-0f7e-42f1-824b-a6cef692c751-log-httpd\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.469840 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-279bt\" (UniqueName: \"kubernetes.io/projected/dd1c47cb-0f7e-42f1-824b-a6cef692c751-kube-api-access-279bt\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.469859 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-scripts\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.470070 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.474749 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd1c47cb-0f7e-42f1-824b-a6cef692c751-run-httpd\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.474988 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd1c47cb-0f7e-42f1-824b-a6cef692c751-log-httpd\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.477392 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/779e7a51-657e-47ae-a068-3cd339cd9bb1-db-sync-config-data\") pod \"barbican-db-sync-sp8xb\" (UID: \"779e7a51-657e-47ae-a068-3cd339cd9bb1\") " pod="openstack/barbican-db-sync-sp8xb" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.485867 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/779e7a51-657e-47ae-a068-3cd339cd9bb1-combined-ca-bundle\") pod \"barbican-db-sync-sp8xb\" (UID: \"779e7a51-657e-47ae-a068-3cd339cd9bb1\") " pod="openstack/barbican-db-sync-sp8xb" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.487043 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-config-data\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.490789 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6ffb94d8ff-jt479"] Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.493462 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2-config\") pod \"neutron-db-sync-46vxl\" (UID: \"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2\") " pod="openstack/neutron-db-sync-46vxl" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.496952 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.508446 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-scripts\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.516982 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvrkd\" (UniqueName: \"kubernetes.io/projected/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2-kube-api-access-lvrkd\") pod \"neutron-db-sync-46vxl\" (UID: \"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2\") " pod="openstack/neutron-db-sync-46vxl" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.519082 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-279bt\" (UniqueName: \"kubernetes.io/projected/dd1c47cb-0f7e-42f1-824b-a6cef692c751-kube-api-access-279bt\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.521061 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2-combined-ca-bundle\") pod \"neutron-db-sync-46vxl\" (UID: \"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2\") " pod="openstack/neutron-db-sync-46vxl" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.532307 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5bnm\" (UniqueName: \"kubernetes.io/projected/779e7a51-657e-47ae-a068-3cd339cd9bb1-kube-api-access-l5bnm\") pod \"barbican-db-sync-sp8xb\" (UID: \"779e7a51-657e-47ae-a068-3cd339cd9bb1\") " pod="openstack/barbican-db-sync-sp8xb" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.519444 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.575364 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-ovsdbserver-nb\") pod \"dnsmasq-dns-6ffb94d8ff-jt479\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.575427 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-config\") pod \"dnsmasq-dns-6ffb94d8ff-jt479\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.575451 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-scripts\") pod \"placement-db-sync-qrf4k\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.575486 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-combined-ca-bundle\") pod \"placement-db-sync-qrf4k\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.575512 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-config-data\") pod \"placement-db-sync-qrf4k\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.575534 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhb67\" (UniqueName: \"kubernetes.io/projected/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-kube-api-access-xhb67\") pod \"placement-db-sync-qrf4k\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.575570 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-ovsdbserver-sb\") pod \"dnsmasq-dns-6ffb94d8ff-jt479\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.575605 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vplfn\" (UniqueName: \"kubernetes.io/projected/244881e4-0702-46b1-b5f8-3c7da33c1d22-kube-api-access-vplfn\") pod \"dnsmasq-dns-6ffb94d8ff-jt479\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.575623 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-logs\") pod \"placement-db-sync-qrf4k\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.575645 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-dns-svc\") pod \"dnsmasq-dns-6ffb94d8ff-jt479\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.671847 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.676868 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-ovsdbserver-sb\") pod \"dnsmasq-dns-6ffb94d8ff-jt479\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.676970 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vplfn\" (UniqueName: \"kubernetes.io/projected/244881e4-0702-46b1-b5f8-3c7da33c1d22-kube-api-access-vplfn\") pod \"dnsmasq-dns-6ffb94d8ff-jt479\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.676998 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-logs\") pod \"placement-db-sync-qrf4k\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.677154 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-dns-svc\") pod \"dnsmasq-dns-6ffb94d8ff-jt479\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.677220 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-ovsdbserver-nb\") pod \"dnsmasq-dns-6ffb94d8ff-jt479\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.677250 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-config\") pod \"dnsmasq-dns-6ffb94d8ff-jt479\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.677271 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-scripts\") pod \"placement-db-sync-qrf4k\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.677302 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-combined-ca-bundle\") pod \"placement-db-sync-qrf4k\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.677324 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-config-data\") pod \"placement-db-sync-qrf4k\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.677343 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhb67\" (UniqueName: \"kubernetes.io/projected/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-kube-api-access-xhb67\") pod \"placement-db-sync-qrf4k\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.678338 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-ovsdbserver-sb\") pod \"dnsmasq-dns-6ffb94d8ff-jt479\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.678449 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-logs\") pod \"placement-db-sync-qrf4k\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.679015 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-config\") pod \"dnsmasq-dns-6ffb94d8ff-jt479\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.679896 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-dns-svc\") pod \"dnsmasq-dns-6ffb94d8ff-jt479\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.680043 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-ovsdbserver-nb\") pod \"dnsmasq-dns-6ffb94d8ff-jt479\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.683577 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-combined-ca-bundle\") pod \"placement-db-sync-qrf4k\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.683904 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-config-data\") pod \"placement-db-sync-qrf4k\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.684507 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-scripts\") pod \"placement-db-sync-qrf4k\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.696590 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vplfn\" (UniqueName: \"kubernetes.io/projected/244881e4-0702-46b1-b5f8-3c7da33c1d22-kube-api-access-vplfn\") pod \"dnsmasq-dns-6ffb94d8ff-jt479\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.703794 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhb67\" (UniqueName: \"kubernetes.io/projected/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-kube-api-access-xhb67\") pod \"placement-db-sync-qrf4k\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.751692 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-sp8xb" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.769059 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-46vxl" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.790616 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:14 crc kubenswrapper[4857]: I1128 13:40:14.817620 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:15 crc kubenswrapper[4857]: I1128 13:40:15.031834 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9d85d47c-v4nhd"] Nov 28 13:40:15 crc kubenswrapper[4857]: I1128 13:40:15.067860 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-wqm6d"] Nov 28 13:40:15 crc kubenswrapper[4857]: I1128 13:40:15.134464 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-g9jf5"] Nov 28 13:40:15 crc kubenswrapper[4857]: I1128 13:40:15.326597 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:40:15 crc kubenswrapper[4857]: I1128 13:40:15.582261 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-46vxl"] Nov 28 13:40:15 crc kubenswrapper[4857]: I1128 13:40:15.592047 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-sp8xb"] Nov 28 13:40:15 crc kubenswrapper[4857]: W1128 13:40:15.598671 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod779e7a51_657e_47ae_a068_3cd339cd9bb1.slice/crio-1dde1b328dd9aa7f64075ffb1b4459c3ad103546bdfd3bac9e98b01180538ef2 WatchSource:0}: Error finding container 1dde1b328dd9aa7f64075ffb1b4459c3ad103546bdfd3bac9e98b01180538ef2: Status 404 returned error can't find the container with id 1dde1b328dd9aa7f64075ffb1b4459c3ad103546bdfd3bac9e98b01180538ef2 Nov 28 13:40:15 crc kubenswrapper[4857]: I1128 13:40:15.658494 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-g9jf5" event={"ID":"d7eee1cb-c5d6-45e4-a007-0d29935cd83a","Type":"ContainerStarted","Data":"94ad7b5192c1cf2720a9a41e7e9875b0b4e3d8db356ff52e4e74b8cd0af516b7"} Nov 28 13:40:15 crc kubenswrapper[4857]: I1128 13:40:15.661892 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-sp8xb" event={"ID":"779e7a51-657e-47ae-a068-3cd339cd9bb1","Type":"ContainerStarted","Data":"1dde1b328dd9aa7f64075ffb1b4459c3ad103546bdfd3bac9e98b01180538ef2"} Nov 28 13:40:15 crc kubenswrapper[4857]: I1128 13:40:15.667021 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerStarted","Data":"7c06071c7ab94c9d389a4416468a8dae45a8d25bf695e4f1589bbb97f67b9ff8"} Nov 28 13:40:15 crc kubenswrapper[4857]: I1128 13:40:15.667050 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerStarted","Data":"ac8cee2bfbd683e0bf23daa4541d27abead299ecd058365d22a491cf7e370d73"} Nov 28 13:40:15 crc kubenswrapper[4857]: I1128 13:40:15.670799 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dd1c47cb-0f7e-42f1-824b-a6cef692c751","Type":"ContainerStarted","Data":"e704e27bb3e276011833d42591807b7d40f9312c9b8dfc5c46853c9021dcf99b"} Nov 28 13:40:15 crc kubenswrapper[4857]: I1128 13:40:15.679516 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" event={"ID":"51aad21c-4090-40b1-8433-eb79d6d01a68","Type":"ContainerStarted","Data":"4b0e3b609d4a1832ba4807b3c31b1dadc8b4a6d2ff372fb27402ec3d7e70f6b4"} Nov 28 13:40:15 crc kubenswrapper[4857]: I1128 13:40:15.681717 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-dwjz2" event={"ID":"574e8323-bfa6-4c1d-9a87-53f09671c900","Type":"ContainerStarted","Data":"e46dd6d35ad547e09a4579d4f034ecbb1cec046bf81e0cef193422a55414ebac"} Nov 28 13:40:15 crc kubenswrapper[4857]: I1128 13:40:15.683875 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-wqm6d" event={"ID":"db417385-2d5f-42e0-97de-822e20e9e6ed","Type":"ContainerStarted","Data":"0b24f031f6c902bb9c06cf392171d2f2a7d9b72dcff68c732a3ba8173c331a6f"} Nov 28 13:40:15 crc kubenswrapper[4857]: I1128 13:40:15.687860 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-46vxl" event={"ID":"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2","Type":"ContainerStarted","Data":"2c49d4010da0b265dc6443ec1cc0bac74e91be487dbdf984881b7bcf0cb3c3ff"} Nov 28 13:40:15 crc kubenswrapper[4857]: I1128 13:40:15.708373 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6ffb94d8ff-jt479"] Nov 28 13:40:15 crc kubenswrapper[4857]: I1128 13:40:15.716472 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-qrf4k"] Nov 28 13:40:15 crc kubenswrapper[4857]: I1128 13:40:15.733289 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-dwjz2" podStartSLOduration=3.408767196 podStartE2EDuration="35.733268326s" podCreationTimestamp="2025-11-28 13:39:40 +0000 UTC" firstStartedPulling="2025-11-28 13:39:41.493347159 +0000 UTC m=+1273.520722316" lastFinishedPulling="2025-11-28 13:40:13.817848259 +0000 UTC m=+1305.845223446" observedRunningTime="2025-11-28 13:40:15.704526324 +0000 UTC m=+1307.731901511" watchObservedRunningTime="2025-11-28 13:40:15.733268326 +0000 UTC m=+1307.760643493" Nov 28 13:40:16 crc kubenswrapper[4857]: I1128 13:40:16.701820 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-wqm6d" event={"ID":"db417385-2d5f-42e0-97de-822e20e9e6ed","Type":"ContainerStarted","Data":"a1a46ab9fdee200682dc389948aee53258b76c2adf51df8c6a05643fd6fc557e"} Nov 28 13:40:16 crc kubenswrapper[4857]: I1128 13:40:16.705175 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-46vxl" event={"ID":"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2","Type":"ContainerStarted","Data":"0878bd25d4cba44dddd8101e2ea744174f24a930321fa1c902f705f1860a22f1"} Nov 28 13:40:16 crc kubenswrapper[4857]: I1128 13:40:16.726970 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-wqm6d" podStartSLOduration=3.726952996 podStartE2EDuration="3.726952996s" podCreationTimestamp="2025-11-28 13:40:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:40:16.722731713 +0000 UTC m=+1308.750106880" watchObservedRunningTime="2025-11-28 13:40:16.726952996 +0000 UTC m=+1308.754328163" Nov 28 13:40:16 crc kubenswrapper[4857]: I1128 13:40:16.737131 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerStarted","Data":"f4326c863e90d24d084dbd8c33e41f8c3206bed85eb21a3ea86b5f28906b546e"} Nov 28 13:40:16 crc kubenswrapper[4857]: I1128 13:40:16.737171 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerStarted","Data":"a2fc2baa7d0114402b84cd8afc19e3b6af384d6ec48988a881766a0605e4b9fa"} Nov 28 13:40:16 crc kubenswrapper[4857]: I1128 13:40:16.740866 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-qrf4k" event={"ID":"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d","Type":"ContainerStarted","Data":"1d3a26da999455a7aae8160140baaf862e44d1a36c2d9eecef58dc5c0d7e7f3a"} Nov 28 13:40:16 crc kubenswrapper[4857]: I1128 13:40:16.742906 4857 generic.go:334] "Generic (PLEG): container finished" podID="244881e4-0702-46b1-b5f8-3c7da33c1d22" containerID="55ef976863211cfbc8fa9d674a0e5a95c26e6145deab01a2924a0507165b1273" exitCode=0 Nov 28 13:40:16 crc kubenswrapper[4857]: I1128 13:40:16.742960 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" event={"ID":"244881e4-0702-46b1-b5f8-3c7da33c1d22","Type":"ContainerDied","Data":"55ef976863211cfbc8fa9d674a0e5a95c26e6145deab01a2924a0507165b1273"} Nov 28 13:40:16 crc kubenswrapper[4857]: I1128 13:40:16.742979 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" event={"ID":"244881e4-0702-46b1-b5f8-3c7da33c1d22","Type":"ContainerStarted","Data":"89b4ccfeda7919704e04de45f52c535bb729a7e3d8cf55a974ccd9ac53cf9cac"} Nov 28 13:40:16 crc kubenswrapper[4857]: I1128 13:40:16.744680 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-46vxl" podStartSLOduration=2.7446579079999998 podStartE2EDuration="2.744657908s" podCreationTimestamp="2025-11-28 13:40:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:40:16.739867749 +0000 UTC m=+1308.767242916" watchObservedRunningTime="2025-11-28 13:40:16.744657908 +0000 UTC m=+1308.772033075" Nov 28 13:40:16 crc kubenswrapper[4857]: I1128 13:40:16.748577 4857 generic.go:334] "Generic (PLEG): container finished" podID="51aad21c-4090-40b1-8433-eb79d6d01a68" containerID="3ca7b9efd5574545e1581b09f5d2d05af8b2e40ee263f4eaa68c97339916b319" exitCode=0 Nov 28 13:40:16 crc kubenswrapper[4857]: I1128 13:40:16.748616 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" event={"ID":"51aad21c-4090-40b1-8433-eb79d6d01a68","Type":"ContainerDied","Data":"3ca7b9efd5574545e1581b09f5d2d05af8b2e40ee263f4eaa68c97339916b319"} Nov 28 13:40:16 crc kubenswrapper[4857]: I1128 13:40:16.780102 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=21.157206704 podStartE2EDuration="50.780085693s" podCreationTimestamp="2025-11-28 13:39:26 +0000 UTC" firstStartedPulling="2025-11-28 13:39:44.48934302 +0000 UTC m=+1276.516718187" lastFinishedPulling="2025-11-28 13:40:14.112222009 +0000 UTC m=+1306.139597176" observedRunningTime="2025-11-28 13:40:16.775441499 +0000 UTC m=+1308.802816666" watchObservedRunningTime="2025-11-28 13:40:16.780085693 +0000 UTC m=+1308.807460860" Nov 28 13:40:16 crc kubenswrapper[4857]: I1128 13:40:16.945319 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.206734 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6ffb94d8ff-jt479"] Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.248394 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-kn9cb"] Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.250193 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.252955 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.262281 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-kn9cb"] Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.266654 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.346425 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4cwr5\" (UniqueName: \"kubernetes.io/projected/51aad21c-4090-40b1-8433-eb79d6d01a68-kube-api-access-4cwr5\") pod \"51aad21c-4090-40b1-8433-eb79d6d01a68\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.347014 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-ovsdbserver-nb\") pod \"51aad21c-4090-40b1-8433-eb79d6d01a68\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.347152 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-dns-svc\") pod \"51aad21c-4090-40b1-8433-eb79d6d01a68\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.347268 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-config\") pod \"51aad21c-4090-40b1-8433-eb79d6d01a68\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.347312 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-ovsdbserver-sb\") pod \"51aad21c-4090-40b1-8433-eb79d6d01a68\" (UID: \"51aad21c-4090-40b1-8433-eb79d6d01a68\") " Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.347852 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-dns-svc\") pod \"dnsmasq-dns-cf78879c9-kn9cb\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.347881 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ps8vt\" (UniqueName: \"kubernetes.io/projected/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-kube-api-access-ps8vt\") pod \"dnsmasq-dns-cf78879c9-kn9cb\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.347935 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-kn9cb\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.348003 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-kn9cb\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.348059 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-kn9cb\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.348086 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-config\") pod \"dnsmasq-dns-cf78879c9-kn9cb\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.377160 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51aad21c-4090-40b1-8433-eb79d6d01a68-kube-api-access-4cwr5" (OuterVolumeSpecName: "kube-api-access-4cwr5") pod "51aad21c-4090-40b1-8433-eb79d6d01a68" (UID: "51aad21c-4090-40b1-8433-eb79d6d01a68"). InnerVolumeSpecName "kube-api-access-4cwr5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.383898 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "51aad21c-4090-40b1-8433-eb79d6d01a68" (UID: "51aad21c-4090-40b1-8433-eb79d6d01a68"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.390607 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "51aad21c-4090-40b1-8433-eb79d6d01a68" (UID: "51aad21c-4090-40b1-8433-eb79d6d01a68"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.435559 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-config" (OuterVolumeSpecName: "config") pod "51aad21c-4090-40b1-8433-eb79d6d01a68" (UID: "51aad21c-4090-40b1-8433-eb79d6d01a68"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.443139 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "51aad21c-4090-40b1-8433-eb79d6d01a68" (UID: "51aad21c-4090-40b1-8433-eb79d6d01a68"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.449627 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-dns-svc\") pod \"dnsmasq-dns-cf78879c9-kn9cb\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.449670 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ps8vt\" (UniqueName: \"kubernetes.io/projected/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-kube-api-access-ps8vt\") pod \"dnsmasq-dns-cf78879c9-kn9cb\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.449712 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-kn9cb\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.449798 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-kn9cb\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.449861 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-kn9cb\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.449880 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-config\") pod \"dnsmasq-dns-cf78879c9-kn9cb\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.449925 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.449937 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.449945 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.449954 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/51aad21c-4090-40b1-8433-eb79d6d01a68-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.449962 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4cwr5\" (UniqueName: \"kubernetes.io/projected/51aad21c-4090-40b1-8433-eb79d6d01a68-kube-api-access-4cwr5\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.450716 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-dns-svc\") pod \"dnsmasq-dns-cf78879c9-kn9cb\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.452065 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-config\") pod \"dnsmasq-dns-cf78879c9-kn9cb\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.452558 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-kn9cb\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.453110 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-kn9cb\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.453240 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-kn9cb\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.472736 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ps8vt\" (UniqueName: \"kubernetes.io/projected/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-kube-api-access-ps8vt\") pod \"dnsmasq-dns-cf78879c9-kn9cb\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.577873 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.774714 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" event={"ID":"244881e4-0702-46b1-b5f8-3c7da33c1d22","Type":"ContainerStarted","Data":"9c78d13a6391565f83e356a111a1e5fa94d38a46a713a7dc2c0acc33b05fec9b"} Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.775035 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" podUID="244881e4-0702-46b1-b5f8-3c7da33c1d22" containerName="dnsmasq-dns" containerID="cri-o://9c78d13a6391565f83e356a111a1e5fa94d38a46a713a7dc2c0acc33b05fec9b" gracePeriod=10 Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.775323 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.786809 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.786910 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9d85d47c-v4nhd" event={"ID":"51aad21c-4090-40b1-8433-eb79d6d01a68","Type":"ContainerDied","Data":"4b0e3b609d4a1832ba4807b3c31b1dadc8b4a6d2ff372fb27402ec3d7e70f6b4"} Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.786967 4857 scope.go:117] "RemoveContainer" containerID="3ca7b9efd5574545e1581b09f5d2d05af8b2e40ee263f4eaa68c97339916b319" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.802398 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" podStartSLOduration=3.80238207 podStartE2EDuration="3.80238207s" podCreationTimestamp="2025-11-28 13:40:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:40:17.802342839 +0000 UTC m=+1309.829718006" watchObservedRunningTime="2025-11-28 13:40:17.80238207 +0000 UTC m=+1309.829757237" Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.886244 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9d85d47c-v4nhd"] Nov 28 13:40:17 crc kubenswrapper[4857]: I1128 13:40:17.907120 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c9d85d47c-v4nhd"] Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.100114 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-kn9cb"] Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.329056 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51aad21c-4090-40b1-8433-eb79d6d01a68" path="/var/lib/kubelet/pods/51aad21c-4090-40b1-8433-eb79d6d01a68/volumes" Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.466788 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.575372 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-dns-svc\") pod \"244881e4-0702-46b1-b5f8-3c7da33c1d22\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.575428 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-config\") pod \"244881e4-0702-46b1-b5f8-3c7da33c1d22\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.575588 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-ovsdbserver-sb\") pod \"244881e4-0702-46b1-b5f8-3c7da33c1d22\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.575609 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-ovsdbserver-nb\") pod \"244881e4-0702-46b1-b5f8-3c7da33c1d22\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.575679 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vplfn\" (UniqueName: \"kubernetes.io/projected/244881e4-0702-46b1-b5f8-3c7da33c1d22-kube-api-access-vplfn\") pod \"244881e4-0702-46b1-b5f8-3c7da33c1d22\" (UID: \"244881e4-0702-46b1-b5f8-3c7da33c1d22\") " Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.581693 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/244881e4-0702-46b1-b5f8-3c7da33c1d22-kube-api-access-vplfn" (OuterVolumeSpecName: "kube-api-access-vplfn") pod "244881e4-0702-46b1-b5f8-3c7da33c1d22" (UID: "244881e4-0702-46b1-b5f8-3c7da33c1d22"). InnerVolumeSpecName "kube-api-access-vplfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.635255 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "244881e4-0702-46b1-b5f8-3c7da33c1d22" (UID: "244881e4-0702-46b1-b5f8-3c7da33c1d22"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.638966 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-config" (OuterVolumeSpecName: "config") pod "244881e4-0702-46b1-b5f8-3c7da33c1d22" (UID: "244881e4-0702-46b1-b5f8-3c7da33c1d22"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.639117 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "244881e4-0702-46b1-b5f8-3c7da33c1d22" (UID: "244881e4-0702-46b1-b5f8-3c7da33c1d22"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.668641 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "244881e4-0702-46b1-b5f8-3c7da33c1d22" (UID: "244881e4-0702-46b1-b5f8-3c7da33c1d22"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.679657 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.679978 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.679999 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vplfn\" (UniqueName: \"kubernetes.io/projected/244881e4-0702-46b1-b5f8-3c7da33c1d22-kube-api-access-vplfn\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.680010 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.680020 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/244881e4-0702-46b1-b5f8-3c7da33c1d22-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.796508 4857 generic.go:334] "Generic (PLEG): container finished" podID="244881e4-0702-46b1-b5f8-3c7da33c1d22" containerID="9c78d13a6391565f83e356a111a1e5fa94d38a46a713a7dc2c0acc33b05fec9b" exitCode=0 Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.796656 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" event={"ID":"244881e4-0702-46b1-b5f8-3c7da33c1d22","Type":"ContainerDied","Data":"9c78d13a6391565f83e356a111a1e5fa94d38a46a713a7dc2c0acc33b05fec9b"} Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.797628 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" event={"ID":"244881e4-0702-46b1-b5f8-3c7da33c1d22","Type":"ContainerDied","Data":"89b4ccfeda7919704e04de45f52c535bb729a7e3d8cf55a974ccd9ac53cf9cac"} Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.797699 4857 scope.go:117] "RemoveContainer" containerID="9c78d13a6391565f83e356a111a1e5fa94d38a46a713a7dc2c0acc33b05fec9b" Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.796742 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6ffb94d8ff-jt479" Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.803578 4857 generic.go:334] "Generic (PLEG): container finished" podID="992a1ed2-a6f4-44c2-9cb9-73857ba4d53a" containerID="91ef46368263fb9e62d46672b1da81143900e54aa73f82abdc30a98f231042d2" exitCode=0 Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.803613 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" event={"ID":"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a","Type":"ContainerDied","Data":"91ef46368263fb9e62d46672b1da81143900e54aa73f82abdc30a98f231042d2"} Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.803637 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" event={"ID":"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a","Type":"ContainerStarted","Data":"d1c3b9b287bc84a3c9dc9bd5fcaac4ad1add17a6f5d6ca9dcc088bbba16e5936"} Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.835347 4857 scope.go:117] "RemoveContainer" containerID="55ef976863211cfbc8fa9d674a0e5a95c26e6145deab01a2924a0507165b1273" Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.852389 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6ffb94d8ff-jt479"] Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.867070 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6ffb94d8ff-jt479"] Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.870851 4857 scope.go:117] "RemoveContainer" containerID="9c78d13a6391565f83e356a111a1e5fa94d38a46a713a7dc2c0acc33b05fec9b" Nov 28 13:40:18 crc kubenswrapper[4857]: E1128 13:40:18.872246 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c78d13a6391565f83e356a111a1e5fa94d38a46a713a7dc2c0acc33b05fec9b\": container with ID starting with 9c78d13a6391565f83e356a111a1e5fa94d38a46a713a7dc2c0acc33b05fec9b not found: ID does not exist" containerID="9c78d13a6391565f83e356a111a1e5fa94d38a46a713a7dc2c0acc33b05fec9b" Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.872294 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c78d13a6391565f83e356a111a1e5fa94d38a46a713a7dc2c0acc33b05fec9b"} err="failed to get container status \"9c78d13a6391565f83e356a111a1e5fa94d38a46a713a7dc2c0acc33b05fec9b\": rpc error: code = NotFound desc = could not find container \"9c78d13a6391565f83e356a111a1e5fa94d38a46a713a7dc2c0acc33b05fec9b\": container with ID starting with 9c78d13a6391565f83e356a111a1e5fa94d38a46a713a7dc2c0acc33b05fec9b not found: ID does not exist" Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.872320 4857 scope.go:117] "RemoveContainer" containerID="55ef976863211cfbc8fa9d674a0e5a95c26e6145deab01a2924a0507165b1273" Nov 28 13:40:18 crc kubenswrapper[4857]: E1128 13:40:18.872646 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55ef976863211cfbc8fa9d674a0e5a95c26e6145deab01a2924a0507165b1273\": container with ID starting with 55ef976863211cfbc8fa9d674a0e5a95c26e6145deab01a2924a0507165b1273 not found: ID does not exist" containerID="55ef976863211cfbc8fa9d674a0e5a95c26e6145deab01a2924a0507165b1273" Nov 28 13:40:18 crc kubenswrapper[4857]: I1128 13:40:18.872673 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55ef976863211cfbc8fa9d674a0e5a95c26e6145deab01a2924a0507165b1273"} err="failed to get container status \"55ef976863211cfbc8fa9d674a0e5a95c26e6145deab01a2924a0507165b1273\": rpc error: code = NotFound desc = could not find container \"55ef976863211cfbc8fa9d674a0e5a95c26e6145deab01a2924a0507165b1273\": container with ID starting with 55ef976863211cfbc8fa9d674a0e5a95c26e6145deab01a2924a0507165b1273 not found: ID does not exist" Nov 28 13:40:19 crc kubenswrapper[4857]: I1128 13:40:19.821871 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" event={"ID":"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a","Type":"ContainerStarted","Data":"a25de7ed9c22fbbc2ed87a5ab2c3b5470a0250a9afdd348f295f0cfad5fe9cc9"} Nov 28 13:40:19 crc kubenswrapper[4857]: I1128 13:40:19.822238 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:19 crc kubenswrapper[4857]: I1128 13:40:19.842526 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" podStartSLOduration=2.842509957 podStartE2EDuration="2.842509957s" podCreationTimestamp="2025-11-28 13:40:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:40:19.838438669 +0000 UTC m=+1311.865813856" watchObservedRunningTime="2025-11-28 13:40:19.842509957 +0000 UTC m=+1311.869885124" Nov 28 13:40:20 crc kubenswrapper[4857]: I1128 13:40:20.328585 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="244881e4-0702-46b1-b5f8-3c7da33c1d22" path="/var/lib/kubelet/pods/244881e4-0702-46b1-b5f8-3c7da33c1d22/volumes" Nov 28 13:40:20 crc kubenswrapper[4857]: I1128 13:40:20.836069 4857 generic.go:334] "Generic (PLEG): container finished" podID="db417385-2d5f-42e0-97de-822e20e9e6ed" containerID="a1a46ab9fdee200682dc389948aee53258b76c2adf51df8c6a05643fd6fc557e" exitCode=0 Nov 28 13:40:20 crc kubenswrapper[4857]: I1128 13:40:20.836187 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-wqm6d" event={"ID":"db417385-2d5f-42e0-97de-822e20e9e6ed","Type":"ContainerDied","Data":"a1a46ab9fdee200682dc389948aee53258b76c2adf51df8c6a05643fd6fc557e"} Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.784388 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.853514 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-scripts\") pod \"db417385-2d5f-42e0-97de-822e20e9e6ed\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.853585 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-fernet-keys\") pod \"db417385-2d5f-42e0-97de-822e20e9e6ed\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.853608 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-credential-keys\") pod \"db417385-2d5f-42e0-97de-822e20e9e6ed\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.853659 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-combined-ca-bundle\") pod \"db417385-2d5f-42e0-97de-822e20e9e6ed\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.853783 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-config-data\") pod \"db417385-2d5f-42e0-97de-822e20e9e6ed\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.853800 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jggh4\" (UniqueName: \"kubernetes.io/projected/db417385-2d5f-42e0-97de-822e20e9e6ed-kube-api-access-jggh4\") pod \"db417385-2d5f-42e0-97de-822e20e9e6ed\" (UID: \"db417385-2d5f-42e0-97de-822e20e9e6ed\") " Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.860147 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-wqm6d" event={"ID":"db417385-2d5f-42e0-97de-822e20e9e6ed","Type":"ContainerDied","Data":"0b24f031f6c902bb9c06cf392171d2f2a7d9b72dcff68c732a3ba8173c331a6f"} Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.860199 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0b24f031f6c902bb9c06cf392171d2f2a7d9b72dcff68c732a3ba8173c331a6f" Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.860273 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-wqm6d" Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.863901 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db417385-2d5f-42e0-97de-822e20e9e6ed-kube-api-access-jggh4" (OuterVolumeSpecName: "kube-api-access-jggh4") pod "db417385-2d5f-42e0-97de-822e20e9e6ed" (UID: "db417385-2d5f-42e0-97de-822e20e9e6ed"). InnerVolumeSpecName "kube-api-access-jggh4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.863998 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "db417385-2d5f-42e0-97de-822e20e9e6ed" (UID: "db417385-2d5f-42e0-97de-822e20e9e6ed"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.867236 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "db417385-2d5f-42e0-97de-822e20e9e6ed" (UID: "db417385-2d5f-42e0-97de-822e20e9e6ed"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.882299 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-scripts" (OuterVolumeSpecName: "scripts") pod "db417385-2d5f-42e0-97de-822e20e9e6ed" (UID: "db417385-2d5f-42e0-97de-822e20e9e6ed"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.903120 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "db417385-2d5f-42e0-97de-822e20e9e6ed" (UID: "db417385-2d5f-42e0-97de-822e20e9e6ed"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.904600 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-config-data" (OuterVolumeSpecName: "config-data") pod "db417385-2d5f-42e0-97de-822e20e9e6ed" (UID: "db417385-2d5f-42e0-97de-822e20e9e6ed"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.949088 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-wqm6d"] Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.955208 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.955241 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jggh4\" (UniqueName: \"kubernetes.io/projected/db417385-2d5f-42e0-97de-822e20e9e6ed-kube-api-access-jggh4\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.955255 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.955267 4857 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.955277 4857 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.955287 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db417385-2d5f-42e0-97de-822e20e9e6ed-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:22 crc kubenswrapper[4857]: I1128 13:40:22.965997 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-wqm6d"] Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.035190 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-g2shl"] Nov 28 13:40:23 crc kubenswrapper[4857]: E1128 13:40:23.035609 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="244881e4-0702-46b1-b5f8-3c7da33c1d22" containerName="init" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.035621 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="244881e4-0702-46b1-b5f8-3c7da33c1d22" containerName="init" Nov 28 13:40:23 crc kubenswrapper[4857]: E1128 13:40:23.035632 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="244881e4-0702-46b1-b5f8-3c7da33c1d22" containerName="dnsmasq-dns" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.035638 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="244881e4-0702-46b1-b5f8-3c7da33c1d22" containerName="dnsmasq-dns" Nov 28 13:40:23 crc kubenswrapper[4857]: E1128 13:40:23.035651 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51aad21c-4090-40b1-8433-eb79d6d01a68" containerName="init" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.035658 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="51aad21c-4090-40b1-8433-eb79d6d01a68" containerName="init" Nov 28 13:40:23 crc kubenswrapper[4857]: E1128 13:40:23.035674 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db417385-2d5f-42e0-97de-822e20e9e6ed" containerName="keystone-bootstrap" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.035680 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="db417385-2d5f-42e0-97de-822e20e9e6ed" containerName="keystone-bootstrap" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.035895 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="db417385-2d5f-42e0-97de-822e20e9e6ed" containerName="keystone-bootstrap" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.035914 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="244881e4-0702-46b1-b5f8-3c7da33c1d22" containerName="dnsmasq-dns" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.035921 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="51aad21c-4090-40b1-8433-eb79d6d01a68" containerName="init" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.036492 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.045050 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-g2shl"] Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.056934 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-scripts\") pod \"keystone-bootstrap-g2shl\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.057214 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-fernet-keys\") pod \"keystone-bootstrap-g2shl\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.057348 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-credential-keys\") pod \"keystone-bootstrap-g2shl\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.057483 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-combined-ca-bundle\") pod \"keystone-bootstrap-g2shl\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.057740 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-config-data\") pod \"keystone-bootstrap-g2shl\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.057831 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pf8dv\" (UniqueName: \"kubernetes.io/projected/96846d9c-1949-4655-be98-006b4e5dd154-kube-api-access-pf8dv\") pod \"keystone-bootstrap-g2shl\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.159207 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-fernet-keys\") pod \"keystone-bootstrap-g2shl\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.159282 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-credential-keys\") pod \"keystone-bootstrap-g2shl\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.159347 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-combined-ca-bundle\") pod \"keystone-bootstrap-g2shl\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.159418 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-config-data\") pod \"keystone-bootstrap-g2shl\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.159450 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pf8dv\" (UniqueName: \"kubernetes.io/projected/96846d9c-1949-4655-be98-006b4e5dd154-kube-api-access-pf8dv\") pod \"keystone-bootstrap-g2shl\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.159515 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-scripts\") pod \"keystone-bootstrap-g2shl\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.164595 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-credential-keys\") pod \"keystone-bootstrap-g2shl\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.164626 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-fernet-keys\") pod \"keystone-bootstrap-g2shl\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.165099 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-scripts\") pod \"keystone-bootstrap-g2shl\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.169055 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-combined-ca-bundle\") pod \"keystone-bootstrap-g2shl\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.170910 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-config-data\") pod \"keystone-bootstrap-g2shl\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.181320 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pf8dv\" (UniqueName: \"kubernetes.io/projected/96846d9c-1949-4655-be98-006b4e5dd154-kube-api-access-pf8dv\") pod \"keystone-bootstrap-g2shl\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:23 crc kubenswrapper[4857]: I1128 13:40:23.361383 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:24 crc kubenswrapper[4857]: I1128 13:40:24.321588 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db417385-2d5f-42e0-97de-822e20e9e6ed" path="/var/lib/kubelet/pods/db417385-2d5f-42e0-97de-822e20e9e6ed/volumes" Nov 28 13:40:27 crc kubenswrapper[4857]: I1128 13:40:27.579966 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:40:27 crc kubenswrapper[4857]: I1128 13:40:27.639303 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-ngqsk"] Nov 28 13:40:27 crc kubenswrapper[4857]: I1128 13:40:27.639594 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" podUID="c61bf456-25ac-453e-87cf-e0694d637c22" containerName="dnsmasq-dns" containerID="cri-o://a52115381f0e1bb2ee8bc7978d1269438baa8c3a8f4f36bf04bebd70ee0ffa32" gracePeriod=10 Nov 28 13:40:28 crc kubenswrapper[4857]: I1128 13:40:28.930380 4857 generic.go:334] "Generic (PLEG): container finished" podID="c61bf456-25ac-453e-87cf-e0694d637c22" containerID="a52115381f0e1bb2ee8bc7978d1269438baa8c3a8f4f36bf04bebd70ee0ffa32" exitCode=0 Nov 28 13:40:28 crc kubenswrapper[4857]: I1128 13:40:28.930441 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" event={"ID":"c61bf456-25ac-453e-87cf-e0694d637c22","Type":"ContainerDied","Data":"a52115381f0e1bb2ee8bc7978d1269438baa8c3a8f4f36bf04bebd70ee0ffa32"} Nov 28 13:40:31 crc kubenswrapper[4857]: I1128 13:40:31.977013 4857 generic.go:334] "Generic (PLEG): container finished" podID="574e8323-bfa6-4c1d-9a87-53f09671c900" containerID="e46dd6d35ad547e09a4579d4f034ecbb1cec046bf81e0cef193422a55414ebac" exitCode=0 Nov 28 13:40:31 crc kubenswrapper[4857]: I1128 13:40:31.977149 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-dwjz2" event={"ID":"574e8323-bfa6-4c1d-9a87-53f09671c900","Type":"ContainerDied","Data":"e46dd6d35ad547e09a4579d4f034ecbb1cec046bf81e0cef193422a55414ebac"} Nov 28 13:40:32 crc kubenswrapper[4857]: I1128 13:40:32.179840 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" podUID="c61bf456-25ac-453e-87cf-e0694d637c22" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.117:5353: connect: connection refused" Nov 28 13:40:33 crc kubenswrapper[4857]: I1128 13:40:33.178259 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:40:33 crc kubenswrapper[4857]: I1128 13:40:33.178590 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:40:36 crc kubenswrapper[4857]: E1128 13:40:36.062972 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 28 13:40:36 crc kubenswrapper[4857]: E1128 13:40:36.063365 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l5bnm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-sp8xb_openstack(779e7a51-657e-47ae-a068-3cd339cd9bb1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:40:36 crc kubenswrapper[4857]: E1128 13:40:36.064906 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-sp8xb" podUID="779e7a51-657e-47ae-a068-3cd339cd9bb1" Nov 28 13:40:36 crc kubenswrapper[4857]: I1128 13:40:36.131398 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-dwjz2" Nov 28 13:40:36 crc kubenswrapper[4857]: I1128 13:40:36.176396 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/574e8323-bfa6-4c1d-9a87-53f09671c900-combined-ca-bundle\") pod \"574e8323-bfa6-4c1d-9a87-53f09671c900\" (UID: \"574e8323-bfa6-4c1d-9a87-53f09671c900\") " Nov 28 13:40:36 crc kubenswrapper[4857]: I1128 13:40:36.176482 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/574e8323-bfa6-4c1d-9a87-53f09671c900-db-sync-config-data\") pod \"574e8323-bfa6-4c1d-9a87-53f09671c900\" (UID: \"574e8323-bfa6-4c1d-9a87-53f09671c900\") " Nov 28 13:40:36 crc kubenswrapper[4857]: I1128 13:40:36.176514 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/574e8323-bfa6-4c1d-9a87-53f09671c900-config-data\") pod \"574e8323-bfa6-4c1d-9a87-53f09671c900\" (UID: \"574e8323-bfa6-4c1d-9a87-53f09671c900\") " Nov 28 13:40:36 crc kubenswrapper[4857]: I1128 13:40:36.176635 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cz49d\" (UniqueName: \"kubernetes.io/projected/574e8323-bfa6-4c1d-9a87-53f09671c900-kube-api-access-cz49d\") pod \"574e8323-bfa6-4c1d-9a87-53f09671c900\" (UID: \"574e8323-bfa6-4c1d-9a87-53f09671c900\") " Nov 28 13:40:36 crc kubenswrapper[4857]: I1128 13:40:36.185516 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/574e8323-bfa6-4c1d-9a87-53f09671c900-kube-api-access-cz49d" (OuterVolumeSpecName: "kube-api-access-cz49d") pod "574e8323-bfa6-4c1d-9a87-53f09671c900" (UID: "574e8323-bfa6-4c1d-9a87-53f09671c900"). InnerVolumeSpecName "kube-api-access-cz49d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:36 crc kubenswrapper[4857]: I1128 13:40:36.186093 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/574e8323-bfa6-4c1d-9a87-53f09671c900-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "574e8323-bfa6-4c1d-9a87-53f09671c900" (UID: "574e8323-bfa6-4c1d-9a87-53f09671c900"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:36 crc kubenswrapper[4857]: I1128 13:40:36.231983 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/574e8323-bfa6-4c1d-9a87-53f09671c900-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "574e8323-bfa6-4c1d-9a87-53f09671c900" (UID: "574e8323-bfa6-4c1d-9a87-53f09671c900"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:36 crc kubenswrapper[4857]: I1128 13:40:36.234972 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/574e8323-bfa6-4c1d-9a87-53f09671c900-config-data" (OuterVolumeSpecName: "config-data") pod "574e8323-bfa6-4c1d-9a87-53f09671c900" (UID: "574e8323-bfa6-4c1d-9a87-53f09671c900"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:36 crc kubenswrapper[4857]: I1128 13:40:36.278725 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cz49d\" (UniqueName: \"kubernetes.io/projected/574e8323-bfa6-4c1d-9a87-53f09671c900-kube-api-access-cz49d\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:36 crc kubenswrapper[4857]: I1128 13:40:36.278773 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/574e8323-bfa6-4c1d-9a87-53f09671c900-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:36 crc kubenswrapper[4857]: I1128 13:40:36.278788 4857 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/574e8323-bfa6-4c1d-9a87-53f09671c900-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:36 crc kubenswrapper[4857]: I1128 13:40:36.278796 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/574e8323-bfa6-4c1d-9a87-53f09671c900-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.027161 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-dwjz2" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.027163 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-dwjz2" event={"ID":"574e8323-bfa6-4c1d-9a87-53f09671c900","Type":"ContainerDied","Data":"b0af93f55527451ef3d1dda53632cf34351b46d9c9f5cb9421a1b101899f911b"} Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.027209 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0af93f55527451ef3d1dda53632cf34351b46d9c9f5cb9421a1b101899f911b" Nov 28 13:40:37 crc kubenswrapper[4857]: E1128 13:40:37.030279 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-sp8xb" podUID="779e7a51-657e-47ae-a068-3cd339cd9bb1" Nov 28 13:40:37 crc kubenswrapper[4857]: E1128 13:40:37.239728 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 28 13:40:37 crc kubenswrapper[4857]: E1128 13:40:37.239893 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-h72dj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-g9jf5_openstack(d7eee1cb-c5d6-45e4-a007-0d29935cd83a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:40:37 crc kubenswrapper[4857]: E1128 13:40:37.241707 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-g9jf5" podUID="d7eee1cb-c5d6-45e4-a007-0d29935cd83a" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.468064 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-xt2qc"] Nov 28 13:40:37 crc kubenswrapper[4857]: E1128 13:40:37.468426 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="574e8323-bfa6-4c1d-9a87-53f09671c900" containerName="glance-db-sync" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.468445 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="574e8323-bfa6-4c1d-9a87-53f09671c900" containerName="glance-db-sync" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.468598 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="574e8323-bfa6-4c1d-9a87-53f09671c900" containerName="glance-db-sync" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.471436 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.495366 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-xt2qc"] Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.499039 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.511787 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ddmwk\" (UniqueName: \"kubernetes.io/projected/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-kube-api-access-ddmwk\") pod \"dnsmasq-dns-56df8fb6b7-xt2qc\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.511865 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-xt2qc\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.511892 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-xt2qc\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.511910 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-config\") pod \"dnsmasq-dns-56df8fb6b7-xt2qc\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.511961 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-xt2qc\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.512009 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-xt2qc\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.619409 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-ovsdbserver-nb\") pod \"c61bf456-25ac-453e-87cf-e0694d637c22\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.619660 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-ovsdbserver-sb\") pod \"c61bf456-25ac-453e-87cf-e0694d637c22\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.619723 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zdgkb\" (UniqueName: \"kubernetes.io/projected/c61bf456-25ac-453e-87cf-e0694d637c22-kube-api-access-zdgkb\") pod \"c61bf456-25ac-453e-87cf-e0694d637c22\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.619810 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-dns-svc\") pod \"c61bf456-25ac-453e-87cf-e0694d637c22\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.619826 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-config\") pod \"c61bf456-25ac-453e-87cf-e0694d637c22\" (UID: \"c61bf456-25ac-453e-87cf-e0694d637c22\") " Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.620038 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-xt2qc\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.620069 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ddmwk\" (UniqueName: \"kubernetes.io/projected/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-kube-api-access-ddmwk\") pod \"dnsmasq-dns-56df8fb6b7-xt2qc\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.620123 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-xt2qc\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.620148 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-xt2qc\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.620167 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-config\") pod \"dnsmasq-dns-56df8fb6b7-xt2qc\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.620216 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-xt2qc\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.621059 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-xt2qc\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.621551 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-xt2qc\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.622307 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-config\") pod \"dnsmasq-dns-56df8fb6b7-xt2qc\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.626369 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-xt2qc\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.626611 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-xt2qc\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.659841 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c61bf456-25ac-453e-87cf-e0694d637c22-kube-api-access-zdgkb" (OuterVolumeSpecName: "kube-api-access-zdgkb") pod "c61bf456-25ac-453e-87cf-e0694d637c22" (UID: "c61bf456-25ac-453e-87cf-e0694d637c22"). InnerVolumeSpecName "kube-api-access-zdgkb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.673510 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ddmwk\" (UniqueName: \"kubernetes.io/projected/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-kube-api-access-ddmwk\") pod \"dnsmasq-dns-56df8fb6b7-xt2qc\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.723068 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zdgkb\" (UniqueName: \"kubernetes.io/projected/c61bf456-25ac-453e-87cf-e0694d637c22-kube-api-access-zdgkb\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.726275 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-g2shl"] Nov 28 13:40:37 crc kubenswrapper[4857]: I1128 13:40:37.858514 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.032523 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-config" (OuterVolumeSpecName: "config") pod "c61bf456-25ac-453e-87cf-e0694d637c22" (UID: "c61bf456-25ac-453e-87cf-e0694d637c22"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.045160 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" event={"ID":"c61bf456-25ac-453e-87cf-e0694d637c22","Type":"ContainerDied","Data":"b51068fb79628ae3750196307117b007388c92165f008e001ab06ac088c5dec7"} Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.045231 4857 scope.go:117] "RemoveContainer" containerID="a52115381f0e1bb2ee8bc7978d1269438baa8c3a8f4f36bf04bebd70ee0ffa32" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.045384 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.069038 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-g2shl" event={"ID":"96846d9c-1949-4655-be98-006b4e5dd154","Type":"ContainerStarted","Data":"d37f4d2803f4687208580e072995a86cacf58b914680ef2eeab4a14b8c609ba7"} Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.074053 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-qrf4k" event={"ID":"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d","Type":"ContainerStarted","Data":"91dcce04820a9b0657a39d68a3db2fea0d2ae4c92ed1c937c1ef2c2fce48486a"} Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.091382 4857 scope.go:117] "RemoveContainer" containerID="1432dae7d3eda2dff4167d74c6617d9c5bf396db3afbecfec74f584a276a961e" Nov 28 13:40:38 crc kubenswrapper[4857]: E1128 13:40:38.091825 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-g9jf5" podUID="d7eee1cb-c5d6-45e4-a007-0d29935cd83a" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.098998 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c61bf456-25ac-453e-87cf-e0694d637c22" (UID: "c61bf456-25ac-453e-87cf-e0694d637c22"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.104855 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-qrf4k" podStartSLOduration=2.613892933 podStartE2EDuration="24.104840149s" podCreationTimestamp="2025-11-28 13:40:14 +0000 UTC" firstStartedPulling="2025-11-28 13:40:15.714275986 +0000 UTC m=+1307.741651153" lastFinishedPulling="2025-11-28 13:40:37.205223202 +0000 UTC m=+1329.232598369" observedRunningTime="2025-11-28 13:40:38.09863192 +0000 UTC m=+1330.126007087" watchObservedRunningTime="2025-11-28 13:40:38.104840149 +0000 UTC m=+1330.132215316" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.129708 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.129734 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.148063 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c61bf456-25ac-453e-87cf-e0694d637c22" (UID: "c61bf456-25ac-453e-87cf-e0694d637c22"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.158610 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c61bf456-25ac-453e-87cf-e0694d637c22" (UID: "c61bf456-25ac-453e-87cf-e0694d637c22"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.231235 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.231655 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c61bf456-25ac-453e-87cf-e0694d637c22-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.364011 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-xt2qc"] Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.384601 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-ngqsk"] Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.392493 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-ngqsk"] Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.469699 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:40:38 crc kubenswrapper[4857]: E1128 13:40:38.470165 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c61bf456-25ac-453e-87cf-e0694d637c22" containerName="init" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.470190 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c61bf456-25ac-453e-87cf-e0694d637c22" containerName="init" Nov 28 13:40:38 crc kubenswrapper[4857]: E1128 13:40:38.470204 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c61bf456-25ac-453e-87cf-e0694d637c22" containerName="dnsmasq-dns" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.470211 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c61bf456-25ac-453e-87cf-e0694d637c22" containerName="dnsmasq-dns" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.472004 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="c61bf456-25ac-453e-87cf-e0694d637c22" containerName="dnsmasq-dns" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.473521 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.475465 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.476605 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.476824 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-krs5d" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.486943 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.592267 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.594323 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.597328 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.622461 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.639368 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a62040a-8596-401c-a157-ac001544fae8-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.639450 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a62040a-8596-401c-a157-ac001544fae8-logs\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.639493 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.639508 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a62040a-8596-401c-a157-ac001544fae8-config-data\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.639679 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3a62040a-8596-401c-a157-ac001544fae8-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.639742 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a62040a-8596-401c-a157-ac001544fae8-scripts\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.639801 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8tp4l\" (UniqueName: \"kubernetes.io/projected/3a62040a-8596-401c-a157-ac001544fae8-kube-api-access-8tp4l\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.740970 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c7c84546-0f82-49f5-b373-58eb6ead7939-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.741329 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7c84546-0f82-49f5-b373-58eb6ead7939-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.741364 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3a62040a-8596-401c-a157-ac001544fae8-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.741399 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.741451 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvv9s\" (UniqueName: \"kubernetes.io/projected/c7c84546-0f82-49f5-b373-58eb6ead7939-kube-api-access-mvv9s\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.741496 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a62040a-8596-401c-a157-ac001544fae8-scripts\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.741528 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8tp4l\" (UniqueName: \"kubernetes.io/projected/3a62040a-8596-401c-a157-ac001544fae8-kube-api-access-8tp4l\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.741577 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7c84546-0f82-49f5-b373-58eb6ead7939-logs\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.741603 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a62040a-8596-401c-a157-ac001544fae8-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.741632 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a62040a-8596-401c-a157-ac001544fae8-logs\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.741657 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.741676 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a62040a-8596-401c-a157-ac001544fae8-config-data\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.741696 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7c84546-0f82-49f5-b373-58eb6ead7939-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.741772 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7c84546-0f82-49f5-b373-58eb6ead7939-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.742007 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3a62040a-8596-401c-a157-ac001544fae8-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.742352 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a62040a-8596-401c-a157-ac001544fae8-logs\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.742406 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.748600 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a62040a-8596-401c-a157-ac001544fae8-config-data\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.752605 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a62040a-8596-401c-a157-ac001544fae8-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.761878 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a62040a-8596-401c-a157-ac001544fae8-scripts\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.764123 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8tp4l\" (UniqueName: \"kubernetes.io/projected/3a62040a-8596-401c-a157-ac001544fae8-kube-api-access-8tp4l\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.779699 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.842801 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7c84546-0f82-49f5-b373-58eb6ead7939-logs\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.843073 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7c84546-0f82-49f5-b373-58eb6ead7939-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.843223 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7c84546-0f82-49f5-b373-58eb6ead7939-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.843359 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7c84546-0f82-49f5-b373-58eb6ead7939-logs\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.843375 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c7c84546-0f82-49f5-b373-58eb6ead7939-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.843513 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7c84546-0f82-49f5-b373-58eb6ead7939-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.843590 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.843631 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvv9s\" (UniqueName: \"kubernetes.io/projected/c7c84546-0f82-49f5-b373-58eb6ead7939-kube-api-access-mvv9s\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.843828 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.844450 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c7c84546-0f82-49f5-b373-58eb6ead7939-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.847622 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7c84546-0f82-49f5-b373-58eb6ead7939-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.847933 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7c84546-0f82-49f5-b373-58eb6ead7939-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.849127 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7c84546-0f82-49f5-b373-58eb6ead7939-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.863413 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvv9s\" (UniqueName: \"kubernetes.io/projected/c7c84546-0f82-49f5-b373-58eb6ead7939-kube-api-access-mvv9s\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.866870 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.870475 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:38 crc kubenswrapper[4857]: I1128 13:40:38.911240 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:40:39 crc kubenswrapper[4857]: I1128 13:40:39.106711 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dd1c47cb-0f7e-42f1-824b-a6cef692c751","Type":"ContainerStarted","Data":"d64f59fc0d63d8fdddaf62677a40dab269073c3700fca581921792c2837fe785"} Nov 28 13:40:39 crc kubenswrapper[4857]: I1128 13:40:39.124517 4857 generic.go:334] "Generic (PLEG): container finished" podID="2b7a5ae9-9951-4c40-9f14-18db9ef9084a" containerID="7a5f00c3945d3734a4fd6a2de8380d88c0a0f781f2c72e162e35e213c6d0b74c" exitCode=0 Nov 28 13:40:39 crc kubenswrapper[4857]: I1128 13:40:39.124847 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" event={"ID":"2b7a5ae9-9951-4c40-9f14-18db9ef9084a","Type":"ContainerDied","Data":"7a5f00c3945d3734a4fd6a2de8380d88c0a0f781f2c72e162e35e213c6d0b74c"} Nov 28 13:40:39 crc kubenswrapper[4857]: I1128 13:40:39.124872 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" event={"ID":"2b7a5ae9-9951-4c40-9f14-18db9ef9084a","Type":"ContainerStarted","Data":"d80f9a2a2b9793b20b87e03187921bb5b261d021f5fd58e75c087700dd4e8d1c"} Nov 28 13:40:39 crc kubenswrapper[4857]: I1128 13:40:39.133513 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-g2shl" event={"ID":"96846d9c-1949-4655-be98-006b4e5dd154","Type":"ContainerStarted","Data":"a9f3b9fd804e2424e73c9dfbddaef4f71d9e87da6184a7e141669067fc738bc2"} Nov 28 13:40:39 crc kubenswrapper[4857]: I1128 13:40:39.186860 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-g2shl" podStartSLOduration=16.186845804 podStartE2EDuration="16.186845804s" podCreationTimestamp="2025-11-28 13:40:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:40:39.181657254 +0000 UTC m=+1331.209032421" watchObservedRunningTime="2025-11-28 13:40:39.186845804 +0000 UTC m=+1331.214220971" Nov 28 13:40:39 crc kubenswrapper[4857]: W1128 13:40:39.512274 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a62040a_8596_401c_a157_ac001544fae8.slice/crio-5735ebc6c8fda9fdbcf2d4a7de3c74d3bbb293d548ed3d3f34faa1cd61cf353d WatchSource:0}: Error finding container 5735ebc6c8fda9fdbcf2d4a7de3c74d3bbb293d548ed3d3f34faa1cd61cf353d: Status 404 returned error can't find the container with id 5735ebc6c8fda9fdbcf2d4a7de3c74d3bbb293d548ed3d3f34faa1cd61cf353d Nov 28 13:40:39 crc kubenswrapper[4857]: I1128 13:40:39.523361 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:40:39 crc kubenswrapper[4857]: I1128 13:40:39.685988 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:40:39 crc kubenswrapper[4857]: W1128 13:40:39.702631 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc7c84546_0f82_49f5_b373_58eb6ead7939.slice/crio-682032d79cb6e650298964663ecf39afb33b6b993fa9381c637569c77a0df319 WatchSource:0}: Error finding container 682032d79cb6e650298964663ecf39afb33b6b993fa9381c637569c77a0df319: Status 404 returned error can't find the container with id 682032d79cb6e650298964663ecf39afb33b6b993fa9381c637569c77a0df319 Nov 28 13:40:40 crc kubenswrapper[4857]: I1128 13:40:40.155081 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3a62040a-8596-401c-a157-ac001544fae8","Type":"ContainerStarted","Data":"5735ebc6c8fda9fdbcf2d4a7de3c74d3bbb293d548ed3d3f34faa1cd61cf353d"} Nov 28 13:40:40 crc kubenswrapper[4857]: I1128 13:40:40.175467 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" event={"ID":"2b7a5ae9-9951-4c40-9f14-18db9ef9084a","Type":"ContainerStarted","Data":"59947e48b393a942777a6a3175b36ea339cd93ce33c07162c6e9cd5791ad24cb"} Nov 28 13:40:40 crc kubenswrapper[4857]: I1128 13:40:40.175557 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:40 crc kubenswrapper[4857]: I1128 13:40:40.177797 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c7c84546-0f82-49f5-b373-58eb6ead7939","Type":"ContainerStarted","Data":"682032d79cb6e650298964663ecf39afb33b6b993fa9381c637569c77a0df319"} Nov 28 13:40:40 crc kubenswrapper[4857]: I1128 13:40:40.232180 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:40:40 crc kubenswrapper[4857]: I1128 13:40:40.240302 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" podStartSLOduration=3.240280013 podStartE2EDuration="3.240280013s" podCreationTimestamp="2025-11-28 13:40:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:40:40.227827102 +0000 UTC m=+1332.255202279" watchObservedRunningTime="2025-11-28 13:40:40.240280013 +0000 UTC m=+1332.267655180" Nov 28 13:40:40 crc kubenswrapper[4857]: I1128 13:40:40.344104 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c61bf456-25ac-453e-87cf-e0694d637c22" path="/var/lib/kubelet/pods/c61bf456-25ac-453e-87cf-e0694d637c22/volumes" Nov 28 13:40:40 crc kubenswrapper[4857]: I1128 13:40:40.370398 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.191157 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c7c84546-0f82-49f5-b373-58eb6ead7939","Type":"ContainerStarted","Data":"7a5195067a244a982b0bd02b88a4005109465de1b7807e494f20b75b989d8ef5"} Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.195930 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dd1c47cb-0f7e-42f1-824b-a6cef692c751","Type":"ContainerStarted","Data":"67d7fc4f5095e379062283d9a025c1c36226cc34ff139a5a7da965bb1e6c4b4a"} Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.198733 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3a62040a-8596-401c-a157-ac001544fae8","Type":"ContainerStarted","Data":"02f65b346788a578a955e9ad5bb8517de177434756ea090f43bcf5b003b2369a"} Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.198792 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3a62040a-8596-401c-a157-ac001544fae8","Type":"ContainerStarted","Data":"33138fdd1082f2e2849a24dd50232873cb72c296b0636533a36710aa7534805c"} Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.198931 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="3a62040a-8596-401c-a157-ac001544fae8" containerName="glance-log" containerID="cri-o://33138fdd1082f2e2849a24dd50232873cb72c296b0636533a36710aa7534805c" gracePeriod=30 Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.199026 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="3a62040a-8596-401c-a157-ac001544fae8" containerName="glance-httpd" containerID="cri-o://02f65b346788a578a955e9ad5bb8517de177434756ea090f43bcf5b003b2369a" gracePeriod=30 Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.202215 4857 generic.go:334] "Generic (PLEG): container finished" podID="e603bbb8-24d6-43aa-bd0e-0039d8abc8e2" containerID="0878bd25d4cba44dddd8101e2ea744174f24a930321fa1c902f705f1860a22f1" exitCode=0 Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.202280 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-46vxl" event={"ID":"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2","Type":"ContainerDied","Data":"0878bd25d4cba44dddd8101e2ea744174f24a930321fa1c902f705f1860a22f1"} Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.208024 4857 generic.go:334] "Generic (PLEG): container finished" podID="a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d" containerID="91dcce04820a9b0657a39d68a3db2fea0d2ae4c92ed1c937c1ef2c2fce48486a" exitCode=0 Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.208405 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-qrf4k" event={"ID":"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d","Type":"ContainerDied","Data":"91dcce04820a9b0657a39d68a3db2fea0d2ae4c92ed1c937c1ef2c2fce48486a"} Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.228489 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.228467503 podStartE2EDuration="4.228467503s" podCreationTimestamp="2025-11-28 13:40:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:40:41.218744472 +0000 UTC m=+1333.246119639" watchObservedRunningTime="2025-11-28 13:40:41.228467503 +0000 UTC m=+1333.255842660" Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.771930 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.905949 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a62040a-8596-401c-a157-ac001544fae8-combined-ca-bundle\") pod \"3a62040a-8596-401c-a157-ac001544fae8\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.906009 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a62040a-8596-401c-a157-ac001544fae8-logs\") pod \"3a62040a-8596-401c-a157-ac001544fae8\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.906074 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a62040a-8596-401c-a157-ac001544fae8-config-data\") pod \"3a62040a-8596-401c-a157-ac001544fae8\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.906117 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tp4l\" (UniqueName: \"kubernetes.io/projected/3a62040a-8596-401c-a157-ac001544fae8-kube-api-access-8tp4l\") pod \"3a62040a-8596-401c-a157-ac001544fae8\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.906173 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3a62040a-8596-401c-a157-ac001544fae8-httpd-run\") pod \"3a62040a-8596-401c-a157-ac001544fae8\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.906218 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"3a62040a-8596-401c-a157-ac001544fae8\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.906242 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a62040a-8596-401c-a157-ac001544fae8-scripts\") pod \"3a62040a-8596-401c-a157-ac001544fae8\" (UID: \"3a62040a-8596-401c-a157-ac001544fae8\") " Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.906528 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a62040a-8596-401c-a157-ac001544fae8-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "3a62040a-8596-401c-a157-ac001544fae8" (UID: "3a62040a-8596-401c-a157-ac001544fae8"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.906816 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a62040a-8596-401c-a157-ac001544fae8-logs" (OuterVolumeSpecName: "logs") pod "3a62040a-8596-401c-a157-ac001544fae8" (UID: "3a62040a-8596-401c-a157-ac001544fae8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.911712 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a62040a-8596-401c-a157-ac001544fae8-scripts" (OuterVolumeSpecName: "scripts") pod "3a62040a-8596-401c-a157-ac001544fae8" (UID: "3a62040a-8596-401c-a157-ac001544fae8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.914114 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a62040a-8596-401c-a157-ac001544fae8-kube-api-access-8tp4l" (OuterVolumeSpecName: "kube-api-access-8tp4l") pod "3a62040a-8596-401c-a157-ac001544fae8" (UID: "3a62040a-8596-401c-a157-ac001544fae8"). InnerVolumeSpecName "kube-api-access-8tp4l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.917124 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "3a62040a-8596-401c-a157-ac001544fae8" (UID: "3a62040a-8596-401c-a157-ac001544fae8"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.940844 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a62040a-8596-401c-a157-ac001544fae8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3a62040a-8596-401c-a157-ac001544fae8" (UID: "3a62040a-8596-401c-a157-ac001544fae8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:41 crc kubenswrapper[4857]: I1128 13:40:41.954715 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a62040a-8596-401c-a157-ac001544fae8-config-data" (OuterVolumeSpecName: "config-data") pod "3a62040a-8596-401c-a157-ac001544fae8" (UID: "3a62040a-8596-401c-a157-ac001544fae8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.009848 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3a62040a-8596-401c-a157-ac001544fae8-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.009894 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.009904 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a62040a-8596-401c-a157-ac001544fae8-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.009915 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a62040a-8596-401c-a157-ac001544fae8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.009925 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a62040a-8596-401c-a157-ac001544fae8-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.009933 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a62040a-8596-401c-a157-ac001544fae8-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.009941 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tp4l\" (UniqueName: \"kubernetes.io/projected/3a62040a-8596-401c-a157-ac001544fae8-kube-api-access-8tp4l\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.036799 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.111238 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.179256 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-ngqsk" podUID="c61bf456-25ac-453e-87cf-e0694d637c22" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.117:5353: i/o timeout" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.225587 4857 generic.go:334] "Generic (PLEG): container finished" podID="3a62040a-8596-401c-a157-ac001544fae8" containerID="02f65b346788a578a955e9ad5bb8517de177434756ea090f43bcf5b003b2369a" exitCode=143 Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.225669 4857 generic.go:334] "Generic (PLEG): container finished" podID="3a62040a-8596-401c-a157-ac001544fae8" containerID="33138fdd1082f2e2849a24dd50232873cb72c296b0636533a36710aa7534805c" exitCode=143 Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.225718 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3a62040a-8596-401c-a157-ac001544fae8","Type":"ContainerDied","Data":"02f65b346788a578a955e9ad5bb8517de177434756ea090f43bcf5b003b2369a"} Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.225850 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3a62040a-8596-401c-a157-ac001544fae8","Type":"ContainerDied","Data":"33138fdd1082f2e2849a24dd50232873cb72c296b0636533a36710aa7534805c"} Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.225862 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3a62040a-8596-401c-a157-ac001544fae8","Type":"ContainerDied","Data":"5735ebc6c8fda9fdbcf2d4a7de3c74d3bbb293d548ed3d3f34faa1cd61cf353d"} Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.225884 4857 scope.go:117] "RemoveContainer" containerID="02f65b346788a578a955e9ad5bb8517de177434756ea090f43bcf5b003b2369a" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.226064 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.237135 4857 generic.go:334] "Generic (PLEG): container finished" podID="96846d9c-1949-4655-be98-006b4e5dd154" containerID="a9f3b9fd804e2424e73c9dfbddaef4f71d9e87da6184a7e141669067fc738bc2" exitCode=0 Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.237237 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-g2shl" event={"ID":"96846d9c-1949-4655-be98-006b4e5dd154","Type":"ContainerDied","Data":"a9f3b9fd804e2424e73c9dfbddaef4f71d9e87da6184a7e141669067fc738bc2"} Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.248043 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="c7c84546-0f82-49f5-b373-58eb6ead7939" containerName="glance-log" containerID="cri-o://7a5195067a244a982b0bd02b88a4005109465de1b7807e494f20b75b989d8ef5" gracePeriod=30 Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.248643 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c7c84546-0f82-49f5-b373-58eb6ead7939","Type":"ContainerStarted","Data":"4d84cdd67c7b9f9ffce893f5cc215dc8a62a5a3769a4deb481b7b9da49bd8328"} Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.248867 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="c7c84546-0f82-49f5-b373-58eb6ead7939" containerName="glance-httpd" containerID="cri-o://4d84cdd67c7b9f9ffce893f5cc215dc8a62a5a3769a4deb481b7b9da49bd8328" gracePeriod=30 Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.313578 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.313553448 podStartE2EDuration="5.313553448s" podCreationTimestamp="2025-11-28 13:40:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:40:42.297138413 +0000 UTC m=+1334.324513610" watchObservedRunningTime="2025-11-28 13:40:42.313553448 +0000 UTC m=+1334.340928625" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.316828 4857 scope.go:117] "RemoveContainer" containerID="33138fdd1082f2e2849a24dd50232873cb72c296b0636533a36710aa7534805c" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.371595 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.391383 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.395941 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:40:42 crc kubenswrapper[4857]: E1128 13:40:42.396441 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a62040a-8596-401c-a157-ac001544fae8" containerName="glance-httpd" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.396460 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a62040a-8596-401c-a157-ac001544fae8" containerName="glance-httpd" Nov 28 13:40:42 crc kubenswrapper[4857]: E1128 13:40:42.396477 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a62040a-8596-401c-a157-ac001544fae8" containerName="glance-log" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.396483 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a62040a-8596-401c-a157-ac001544fae8" containerName="glance-log" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.396679 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a62040a-8596-401c-a157-ac001544fae8" containerName="glance-log" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.396697 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a62040a-8596-401c-a157-ac001544fae8" containerName="glance-httpd" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.397849 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.403106 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.405714 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.405929 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.443880 4857 scope.go:117] "RemoveContainer" containerID="02f65b346788a578a955e9ad5bb8517de177434756ea090f43bcf5b003b2369a" Nov 28 13:40:42 crc kubenswrapper[4857]: E1128 13:40:42.445456 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02f65b346788a578a955e9ad5bb8517de177434756ea090f43bcf5b003b2369a\": container with ID starting with 02f65b346788a578a955e9ad5bb8517de177434756ea090f43bcf5b003b2369a not found: ID does not exist" containerID="02f65b346788a578a955e9ad5bb8517de177434756ea090f43bcf5b003b2369a" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.445622 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02f65b346788a578a955e9ad5bb8517de177434756ea090f43bcf5b003b2369a"} err="failed to get container status \"02f65b346788a578a955e9ad5bb8517de177434756ea090f43bcf5b003b2369a\": rpc error: code = NotFound desc = could not find container \"02f65b346788a578a955e9ad5bb8517de177434756ea090f43bcf5b003b2369a\": container with ID starting with 02f65b346788a578a955e9ad5bb8517de177434756ea090f43bcf5b003b2369a not found: ID does not exist" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.445792 4857 scope.go:117] "RemoveContainer" containerID="33138fdd1082f2e2849a24dd50232873cb72c296b0636533a36710aa7534805c" Nov 28 13:40:42 crc kubenswrapper[4857]: E1128 13:40:42.446452 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33138fdd1082f2e2849a24dd50232873cb72c296b0636533a36710aa7534805c\": container with ID starting with 33138fdd1082f2e2849a24dd50232873cb72c296b0636533a36710aa7534805c not found: ID does not exist" containerID="33138fdd1082f2e2849a24dd50232873cb72c296b0636533a36710aa7534805c" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.446491 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33138fdd1082f2e2849a24dd50232873cb72c296b0636533a36710aa7534805c"} err="failed to get container status \"33138fdd1082f2e2849a24dd50232873cb72c296b0636533a36710aa7534805c\": rpc error: code = NotFound desc = could not find container \"33138fdd1082f2e2849a24dd50232873cb72c296b0636533a36710aa7534805c\": container with ID starting with 33138fdd1082f2e2849a24dd50232873cb72c296b0636533a36710aa7534805c not found: ID does not exist" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.446516 4857 scope.go:117] "RemoveContainer" containerID="02f65b346788a578a955e9ad5bb8517de177434756ea090f43bcf5b003b2369a" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.446929 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02f65b346788a578a955e9ad5bb8517de177434756ea090f43bcf5b003b2369a"} err="failed to get container status \"02f65b346788a578a955e9ad5bb8517de177434756ea090f43bcf5b003b2369a\": rpc error: code = NotFound desc = could not find container \"02f65b346788a578a955e9ad5bb8517de177434756ea090f43bcf5b003b2369a\": container with ID starting with 02f65b346788a578a955e9ad5bb8517de177434756ea090f43bcf5b003b2369a not found: ID does not exist" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.446947 4857 scope.go:117] "RemoveContainer" containerID="33138fdd1082f2e2849a24dd50232873cb72c296b0636533a36710aa7534805c" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.447515 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33138fdd1082f2e2849a24dd50232873cb72c296b0636533a36710aa7534805c"} err="failed to get container status \"33138fdd1082f2e2849a24dd50232873cb72c296b0636533a36710aa7534805c\": rpc error: code = NotFound desc = could not find container \"33138fdd1082f2e2849a24dd50232873cb72c296b0636533a36710aa7534805c\": container with ID starting with 33138fdd1082f2e2849a24dd50232873cb72c296b0636533a36710aa7534805c not found: ID does not exist" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.550112 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-scripts\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.550450 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.550723 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.550781 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-config-data\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.550836 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.550862 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.550887 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzptm\" (UniqueName: \"kubernetes.io/projected/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-kube-api-access-fzptm\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.550935 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-logs\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.653271 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-scripts\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.653384 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.653451 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.653473 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-config-data\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.653501 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.653529 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.653551 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzptm\" (UniqueName: \"kubernetes.io/projected/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-kube-api-access-fzptm\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.653599 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-logs\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.654410 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-logs\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.654620 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.654739 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.666623 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.667068 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-config-data\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.667275 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.668890 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-scripts\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.674230 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzptm\" (UniqueName: \"kubernetes.io/projected/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-kube-api-access-fzptm\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.698014 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.802367 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.812989 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-46vxl" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.823416 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.963277 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lvrkd\" (UniqueName: \"kubernetes.io/projected/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2-kube-api-access-lvrkd\") pod \"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2\" (UID: \"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2\") " Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.963526 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2-combined-ca-bundle\") pod \"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2\" (UID: \"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2\") " Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.963581 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-scripts\") pod \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.963645 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2-config\") pod \"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2\" (UID: \"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2\") " Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.964202 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xhb67\" (UniqueName: \"kubernetes.io/projected/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-kube-api-access-xhb67\") pod \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.964548 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-combined-ca-bundle\") pod \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.964684 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-config-data\") pod \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.964710 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-logs\") pod \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\" (UID: \"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d\") " Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.965567 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-logs" (OuterVolumeSpecName: "logs") pod "a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d" (UID: "a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.968662 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-scripts" (OuterVolumeSpecName: "scripts") pod "a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d" (UID: "a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.968683 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-kube-api-access-xhb67" (OuterVolumeSpecName: "kube-api-access-xhb67") pod "a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d" (UID: "a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d"). InnerVolumeSpecName "kube-api-access-xhb67". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.969414 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2-kube-api-access-lvrkd" (OuterVolumeSpecName: "kube-api-access-lvrkd") pod "e603bbb8-24d6-43aa-bd0e-0039d8abc8e2" (UID: "e603bbb8-24d6-43aa-bd0e-0039d8abc8e2"). InnerVolumeSpecName "kube-api-access-lvrkd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.989474 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d" (UID: "a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:42 crc kubenswrapper[4857]: I1128 13:40:42.991128 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2-config" (OuterVolumeSpecName: "config") pod "e603bbb8-24d6-43aa-bd0e-0039d8abc8e2" (UID: "e603bbb8-24d6-43aa-bd0e-0039d8abc8e2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.015660 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e603bbb8-24d6-43aa-bd0e-0039d8abc8e2" (UID: "e603bbb8-24d6-43aa-bd0e-0039d8abc8e2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.016492 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-config-data" (OuterVolumeSpecName: "config-data") pod "a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d" (UID: "a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.031796 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.066979 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.067028 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.067040 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xhb67\" (UniqueName: \"kubernetes.io/projected/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-kube-api-access-xhb67\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.067055 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.067065 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.067076 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.067085 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lvrkd\" (UniqueName: \"kubernetes.io/projected/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2-kube-api-access-lvrkd\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.067096 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.168176 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7c84546-0f82-49f5-b373-58eb6ead7939-config-data\") pod \"c7c84546-0f82-49f5-b373-58eb6ead7939\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.168245 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"c7c84546-0f82-49f5-b373-58eb6ead7939\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.168272 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7c84546-0f82-49f5-b373-58eb6ead7939-scripts\") pod \"c7c84546-0f82-49f5-b373-58eb6ead7939\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.168333 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7c84546-0f82-49f5-b373-58eb6ead7939-combined-ca-bundle\") pod \"c7c84546-0f82-49f5-b373-58eb6ead7939\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.168428 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c7c84546-0f82-49f5-b373-58eb6ead7939-httpd-run\") pod \"c7c84546-0f82-49f5-b373-58eb6ead7939\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.168474 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mvv9s\" (UniqueName: \"kubernetes.io/projected/c7c84546-0f82-49f5-b373-58eb6ead7939-kube-api-access-mvv9s\") pod \"c7c84546-0f82-49f5-b373-58eb6ead7939\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.168514 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7c84546-0f82-49f5-b373-58eb6ead7939-logs\") pod \"c7c84546-0f82-49f5-b373-58eb6ead7939\" (UID: \"c7c84546-0f82-49f5-b373-58eb6ead7939\") " Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.170264 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7c84546-0f82-49f5-b373-58eb6ead7939-logs" (OuterVolumeSpecName: "logs") pod "c7c84546-0f82-49f5-b373-58eb6ead7939" (UID: "c7c84546-0f82-49f5-b373-58eb6ead7939"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.172819 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7c84546-0f82-49f5-b373-58eb6ead7939-scripts" (OuterVolumeSpecName: "scripts") pod "c7c84546-0f82-49f5-b373-58eb6ead7939" (UID: "c7c84546-0f82-49f5-b373-58eb6ead7939"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.173161 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7c84546-0f82-49f5-b373-58eb6ead7939-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "c7c84546-0f82-49f5-b373-58eb6ead7939" (UID: "c7c84546-0f82-49f5-b373-58eb6ead7939"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.174317 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "c7c84546-0f82-49f5-b373-58eb6ead7939" (UID: "c7c84546-0f82-49f5-b373-58eb6ead7939"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.176629 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7c84546-0f82-49f5-b373-58eb6ead7939-kube-api-access-mvv9s" (OuterVolumeSpecName: "kube-api-access-mvv9s") pod "c7c84546-0f82-49f5-b373-58eb6ead7939" (UID: "c7c84546-0f82-49f5-b373-58eb6ead7939"). InnerVolumeSpecName "kube-api-access-mvv9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.204024 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7c84546-0f82-49f5-b373-58eb6ead7939-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c7c84546-0f82-49f5-b373-58eb6ead7939" (UID: "c7c84546-0f82-49f5-b373-58eb6ead7939"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.228983 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7c84546-0f82-49f5-b373-58eb6ead7939-config-data" (OuterVolumeSpecName: "config-data") pod "c7c84546-0f82-49f5-b373-58eb6ead7939" (UID: "c7c84546-0f82-49f5-b373-58eb6ead7939"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.271554 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c7c84546-0f82-49f5-b373-58eb6ead7939-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.271590 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mvv9s\" (UniqueName: \"kubernetes.io/projected/c7c84546-0f82-49f5-b373-58eb6ead7939-kube-api-access-mvv9s\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.271606 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7c84546-0f82-49f5-b373-58eb6ead7939-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.271616 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7c84546-0f82-49f5-b373-58eb6ead7939-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.271673 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.271688 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7c84546-0f82-49f5-b373-58eb6ead7939-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.271699 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7c84546-0f82-49f5-b373-58eb6ead7939-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.285632 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-46vxl" event={"ID":"e603bbb8-24d6-43aa-bd0e-0039d8abc8e2","Type":"ContainerDied","Data":"2c49d4010da0b265dc6443ec1cc0bac74e91be487dbdf984881b7bcf0cb3c3ff"} Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.285677 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c49d4010da0b265dc6443ec1cc0bac74e91be487dbdf984881b7bcf0cb3c3ff" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.285738 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-46vxl" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.289301 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-qrf4k" event={"ID":"a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d","Type":"ContainerDied","Data":"1d3a26da999455a7aae8160140baaf862e44d1a36c2d9eecef58dc5c0d7e7f3a"} Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.289340 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1d3a26da999455a7aae8160140baaf862e44d1a36c2d9eecef58dc5c0d7e7f3a" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.289392 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-qrf4k" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.293356 4857 generic.go:334] "Generic (PLEG): container finished" podID="c7c84546-0f82-49f5-b373-58eb6ead7939" containerID="4d84cdd67c7b9f9ffce893f5cc215dc8a62a5a3769a4deb481b7b9da49bd8328" exitCode=0 Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.293403 4857 generic.go:334] "Generic (PLEG): container finished" podID="c7c84546-0f82-49f5-b373-58eb6ead7939" containerID="7a5195067a244a982b0bd02b88a4005109465de1b7807e494f20b75b989d8ef5" exitCode=143 Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.293405 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.294065 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c7c84546-0f82-49f5-b373-58eb6ead7939","Type":"ContainerDied","Data":"4d84cdd67c7b9f9ffce893f5cc215dc8a62a5a3769a4deb481b7b9da49bd8328"} Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.294108 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c7c84546-0f82-49f5-b373-58eb6ead7939","Type":"ContainerDied","Data":"7a5195067a244a982b0bd02b88a4005109465de1b7807e494f20b75b989d8ef5"} Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.294123 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c7c84546-0f82-49f5-b373-58eb6ead7939","Type":"ContainerDied","Data":"682032d79cb6e650298964663ecf39afb33b6b993fa9381c637569c77a0df319"} Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.294141 4857 scope.go:117] "RemoveContainer" containerID="4d84cdd67c7b9f9ffce893f5cc215dc8a62a5a3769a4deb481b7b9da49bd8328" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.307050 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.374026 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.384696 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.399577 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.412610 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:40:43 crc kubenswrapper[4857]: E1128 13:40:43.439268 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7c84546-0f82-49f5-b373-58eb6ead7939" containerName="glance-httpd" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.439299 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7c84546-0f82-49f5-b373-58eb6ead7939" containerName="glance-httpd" Nov 28 13:40:43 crc kubenswrapper[4857]: E1128 13:40:43.439322 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d" containerName="placement-db-sync" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.439331 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d" containerName="placement-db-sync" Nov 28 13:40:43 crc kubenswrapper[4857]: E1128 13:40:43.439357 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e603bbb8-24d6-43aa-bd0e-0039d8abc8e2" containerName="neutron-db-sync" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.439365 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e603bbb8-24d6-43aa-bd0e-0039d8abc8e2" containerName="neutron-db-sync" Nov 28 13:40:43 crc kubenswrapper[4857]: E1128 13:40:43.439382 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7c84546-0f82-49f5-b373-58eb6ead7939" containerName="glance-log" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.439389 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7c84546-0f82-49f5-b373-58eb6ead7939" containerName="glance-log" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.439615 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7c84546-0f82-49f5-b373-58eb6ead7939" containerName="glance-log" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.439635 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d" containerName="placement-db-sync" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.439654 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e603bbb8-24d6-43aa-bd0e-0039d8abc8e2" containerName="neutron-db-sync" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.439671 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7c84546-0f82-49f5-b373-58eb6ead7939" containerName="glance-httpd" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.440646 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.440745 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.447351 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.447572 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.448688 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-749fd8cf96-rbd6r"] Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.450693 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.458700 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.459010 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.459221 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.459373 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.459617 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-jffcr" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.481693 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-749fd8cf96-rbd6r"] Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.577776 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.577829 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/946c0669-4c99-46b7-a9ff-437042383642-logs\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.577858 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-config-data\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.577893 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aa5c6527-63ae-4b20-b497-8b7abe609110-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.577968 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-scripts\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.578009 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmdmm\" (UniqueName: \"kubernetes.io/projected/aa5c6527-63ae-4b20-b497-8b7abe609110-kube-api-access-mmdmm\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.578054 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.578077 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-internal-tls-certs\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.578102 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-config-data\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.578127 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-public-tls-certs\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.578160 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-scripts\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.578192 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-combined-ca-bundle\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.578257 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5lbwb\" (UniqueName: \"kubernetes.io/projected/946c0669-4c99-46b7-a9ff-437042383642-kube-api-access-5lbwb\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.578299 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa5c6527-63ae-4b20-b497-8b7abe609110-logs\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.578325 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.586927 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.616805 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-xt2qc"] Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.617065 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" podUID="2b7a5ae9-9951-4c40-9f14-18db9ef9084a" containerName="dnsmasq-dns" containerID="cri-o://59947e48b393a942777a6a3175b36ea339cd93ce33c07162c6e9cd5791ad24cb" gracePeriod=10 Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.665605 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5b54986f64-dxw54"] Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.675262 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.679657 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-scripts\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.679707 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmdmm\" (UniqueName: \"kubernetes.io/projected/aa5c6527-63ae-4b20-b497-8b7abe609110-kube-api-access-mmdmm\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.679737 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.679779 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-internal-tls-certs\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.679805 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-config-data\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.679828 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-public-tls-certs\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.679851 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-scripts\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.679877 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-combined-ca-bundle\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.679910 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5lbwb\" (UniqueName: \"kubernetes.io/projected/946c0669-4c99-46b7-a9ff-437042383642-kube-api-access-5lbwb\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.679935 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa5c6527-63ae-4b20-b497-8b7abe609110-logs\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.679950 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.679985 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.680008 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/946c0669-4c99-46b7-a9ff-437042383642-logs\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.680026 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-config-data\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.680049 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aa5c6527-63ae-4b20-b497-8b7abe609110-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.680538 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aa5c6527-63ae-4b20-b497-8b7abe609110-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.686143 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-scripts\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.687001 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.687155 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.687249 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-96hqp" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.687346 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.687534 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/946c0669-4c99-46b7-a9ff-437042383642-logs\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.688618 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa5c6527-63ae-4b20-b497-8b7abe609110-logs\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.688974 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.690174 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-combined-ca-bundle\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.693549 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-config-data\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.694095 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-scripts\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.694350 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.695399 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.696411 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-public-tls-certs\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.706494 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-internal-tls-certs\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.707542 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-6c6sw"] Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.745310 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmdmm\" (UniqueName: \"kubernetes.io/projected/aa5c6527-63ae-4b20-b497-8b7abe609110-kube-api-access-mmdmm\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.745880 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5lbwb\" (UniqueName: \"kubernetes.io/projected/946c0669-4c99-46b7-a9ff-437042383642-kube-api-access-5lbwb\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.746530 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-config-data\") pod \"placement-749fd8cf96-rbd6r\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.746847 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.784919 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwn48\" (UniqueName: \"kubernetes.io/projected/3fcb638a-dab8-414e-9d24-e49c8437672d-kube-api-access-vwn48\") pod \"neutron-5b54986f64-dxw54\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.784995 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncd4v\" (UniqueName: \"kubernetes.io/projected/e96f8a95-2dec-4981-876b-869966a67b72-kube-api-access-ncd4v\") pod \"dnsmasq-dns-6b7b667979-6c6sw\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.785023 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-httpd-config\") pod \"neutron-5b54986f64-dxw54\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.785063 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-combined-ca-bundle\") pod \"neutron-5b54986f64-dxw54\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.785092 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-6c6sw\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.785440 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-6c6sw\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.785474 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-dns-svc\") pod \"dnsmasq-dns-6b7b667979-6c6sw\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.785525 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-config\") pod \"neutron-5b54986f64-dxw54\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.785570 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-config\") pod \"dnsmasq-dns-6b7b667979-6c6sw\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.785595 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-6c6sw\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.785620 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-ovndb-tls-certs\") pod \"neutron-5b54986f64-dxw54\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.791074 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.792449 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.795317 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5b54986f64-dxw54"] Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.810471 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-6c6sw"] Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.891677 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-6c6sw\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.892048 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-dns-svc\") pod \"dnsmasq-dns-6b7b667979-6c6sw\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.892099 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-config\") pod \"neutron-5b54986f64-dxw54\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.892135 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-config\") pod \"dnsmasq-dns-6b7b667979-6c6sw\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.892151 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-6c6sw\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.892177 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-ovndb-tls-certs\") pod \"neutron-5b54986f64-dxw54\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.892223 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwn48\" (UniqueName: \"kubernetes.io/projected/3fcb638a-dab8-414e-9d24-e49c8437672d-kube-api-access-vwn48\") pod \"neutron-5b54986f64-dxw54\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.892260 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncd4v\" (UniqueName: \"kubernetes.io/projected/e96f8a95-2dec-4981-876b-869966a67b72-kube-api-access-ncd4v\") pod \"dnsmasq-dns-6b7b667979-6c6sw\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.892276 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-httpd-config\") pod \"neutron-5b54986f64-dxw54\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.892309 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-combined-ca-bundle\") pod \"neutron-5b54986f64-dxw54\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.892329 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-6c6sw\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.893131 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-6c6sw\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.893214 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-6c6sw\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.896943 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-dns-svc\") pod \"dnsmasq-dns-6b7b667979-6c6sw\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.897484 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-config\") pod \"dnsmasq-dns-6b7b667979-6c6sw\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.897891 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-httpd-config\") pod \"neutron-5b54986f64-dxw54\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.898094 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-6c6sw\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.903642 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-combined-ca-bundle\") pod \"neutron-5b54986f64-dxw54\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.905494 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-ovndb-tls-certs\") pod \"neutron-5b54986f64-dxw54\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.910308 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-config\") pod \"neutron-5b54986f64-dxw54\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.916805 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwn48\" (UniqueName: \"kubernetes.io/projected/3fcb638a-dab8-414e-9d24-e49c8437672d-kube-api-access-vwn48\") pod \"neutron-5b54986f64-dxw54\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.917170 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncd4v\" (UniqueName: \"kubernetes.io/projected/e96f8a95-2dec-4981-876b-869966a67b72-kube-api-access-ncd4v\") pod \"dnsmasq-dns-6b7b667979-6c6sw\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.928306 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:40:43 crc kubenswrapper[4857]: I1128 13:40:43.939225 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:44 crc kubenswrapper[4857]: I1128 13:40:44.077628 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:40:44 crc kubenswrapper[4857]: I1128 13:40:44.321481 4857 generic.go:334] "Generic (PLEG): container finished" podID="2b7a5ae9-9951-4c40-9f14-18db9ef9084a" containerID="59947e48b393a942777a6a3175b36ea339cd93ce33c07162c6e9cd5791ad24cb" exitCode=0 Nov 28 13:40:44 crc kubenswrapper[4857]: I1128 13:40:44.326915 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a62040a-8596-401c-a157-ac001544fae8" path="/var/lib/kubelet/pods/3a62040a-8596-401c-a157-ac001544fae8/volumes" Nov 28 13:40:44 crc kubenswrapper[4857]: I1128 13:40:44.327937 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7c84546-0f82-49f5-b373-58eb6ead7939" path="/var/lib/kubelet/pods/c7c84546-0f82-49f5-b373-58eb6ead7939/volumes" Nov 28 13:40:44 crc kubenswrapper[4857]: I1128 13:40:44.328512 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" event={"ID":"2b7a5ae9-9951-4c40-9f14-18db9ef9084a","Type":"ContainerDied","Data":"59947e48b393a942777a6a3175b36ea339cd93ce33c07162c6e9cd5791ad24cb"} Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.327459 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.332683 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-g2shl" event={"ID":"96846d9c-1949-4655-be98-006b4e5dd154","Type":"ContainerDied","Data":"d37f4d2803f4687208580e072995a86cacf58b914680ef2eeab4a14b8c609ba7"} Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.332721 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d37f4d2803f4687208580e072995a86cacf58b914680ef2eeab4a14b8c609ba7" Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.332724 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-g2shl" Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.427650 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-combined-ca-bundle\") pod \"96846d9c-1949-4655-be98-006b4e5dd154\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.427708 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pf8dv\" (UniqueName: \"kubernetes.io/projected/96846d9c-1949-4655-be98-006b4e5dd154-kube-api-access-pf8dv\") pod \"96846d9c-1949-4655-be98-006b4e5dd154\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.427811 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-credential-keys\") pod \"96846d9c-1949-4655-be98-006b4e5dd154\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.427863 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-scripts\") pod \"96846d9c-1949-4655-be98-006b4e5dd154\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.427990 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-config-data\") pod \"96846d9c-1949-4655-be98-006b4e5dd154\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.428074 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-fernet-keys\") pod \"96846d9c-1949-4655-be98-006b4e5dd154\" (UID: \"96846d9c-1949-4655-be98-006b4e5dd154\") " Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.462614 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "96846d9c-1949-4655-be98-006b4e5dd154" (UID: "96846d9c-1949-4655-be98-006b4e5dd154"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.462972 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "96846d9c-1949-4655-be98-006b4e5dd154" (UID: "96846d9c-1949-4655-be98-006b4e5dd154"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.463125 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-scripts" (OuterVolumeSpecName: "scripts") pod "96846d9c-1949-4655-be98-006b4e5dd154" (UID: "96846d9c-1949-4655-be98-006b4e5dd154"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.490491 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96846d9c-1949-4655-be98-006b4e5dd154-kube-api-access-pf8dv" (OuterVolumeSpecName: "kube-api-access-pf8dv") pod "96846d9c-1949-4655-be98-006b4e5dd154" (UID: "96846d9c-1949-4655-be98-006b4e5dd154"). InnerVolumeSpecName "kube-api-access-pf8dv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.522519 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-config-data" (OuterVolumeSpecName: "config-data") pod "96846d9c-1949-4655-be98-006b4e5dd154" (UID: "96846d9c-1949-4655-be98-006b4e5dd154"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.524117 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "96846d9c-1949-4655-be98-006b4e5dd154" (UID: "96846d9c-1949-4655-be98-006b4e5dd154"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.530112 4857 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.530161 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.530173 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.530185 4857 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.530196 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96846d9c-1949-4655-be98-006b4e5dd154-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:45 crc kubenswrapper[4857]: I1128 13:40:45.530209 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pf8dv\" (UniqueName: \"kubernetes.io/projected/96846d9c-1949-4655-be98-006b4e5dd154-kube-api-access-pf8dv\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.106945 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7d4894d65-gqnvs"] Nov 28 13:40:46 crc kubenswrapper[4857]: E1128 13:40:46.107675 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96846d9c-1949-4655-be98-006b4e5dd154" containerName="keystone-bootstrap" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.107700 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="96846d9c-1949-4655-be98-006b4e5dd154" containerName="keystone-bootstrap" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.107952 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="96846d9c-1949-4655-be98-006b4e5dd154" containerName="keystone-bootstrap" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.109027 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.111082 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.111251 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.131936 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7d4894d65-gqnvs"] Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.251915 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-ovndb-tls-certs\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.252003 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-config\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.252033 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-public-tls-certs\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.252065 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-combined-ca-bundle\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.252085 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-httpd-config\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.252216 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-internal-tls-certs\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.252341 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-849t6\" (UniqueName: \"kubernetes.io/projected/151aff2f-7aaa-4964-8f75-51c8faf86397-kube-api-access-849t6\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.353675 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-internal-tls-certs\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.353744 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-849t6\" (UniqueName: \"kubernetes.io/projected/151aff2f-7aaa-4964-8f75-51c8faf86397-kube-api-access-849t6\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.353801 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-ovndb-tls-certs\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.353858 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-config\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.353881 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-public-tls-certs\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.353910 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-combined-ca-bundle\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.353926 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-httpd-config\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.364888 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-config\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.365024 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-httpd-config\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.365813 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-combined-ca-bundle\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.367231 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-public-tls-certs\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.368447 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-internal-tls-certs\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.372463 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-ovndb-tls-certs\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.374083 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-849t6\" (UniqueName: \"kubernetes.io/projected/151aff2f-7aaa-4964-8f75-51c8faf86397-kube-api-access-849t6\") pod \"neutron-7d4894d65-gqnvs\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.425740 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.469538 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-5f4cb87f5f-m76pk"] Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.470892 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.473095 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.473327 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.473521 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.473917 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-bzgqn" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.474001 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.481693 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.486433 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5f4cb87f5f-m76pk"] Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.556973 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-internal-tls-certs\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.557290 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-config-data\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.557544 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-public-tls-certs\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.557686 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hh962\" (UniqueName: \"kubernetes.io/projected/adfd05de-d1db-45d3-aea1-b35dc0110b71-kube-api-access-hh962\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.557895 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-scripts\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.558034 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-combined-ca-bundle\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.558183 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-fernet-keys\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.558366 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-credential-keys\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.660656 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-internal-tls-certs\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.660943 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-config-data\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.661700 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-public-tls-certs\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.661868 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hh962\" (UniqueName: \"kubernetes.io/projected/adfd05de-d1db-45d3-aea1-b35dc0110b71-kube-api-access-hh962\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.662422 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-scripts\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.662592 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-combined-ca-bundle\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.662797 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-fernet-keys\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.662955 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-credential-keys\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.665427 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-config-data\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.666179 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-combined-ca-bundle\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.666241 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-scripts\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.666440 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-credential-keys\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.666440 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-internal-tls-certs\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.666525 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-public-tls-certs\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.682866 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hh962\" (UniqueName: \"kubernetes.io/projected/adfd05de-d1db-45d3-aea1-b35dc0110b71-kube-api-access-hh962\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.682878 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-fernet-keys\") pod \"keystone-5f4cb87f5f-m76pk\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:46 crc kubenswrapper[4857]: I1128 13:40:46.796857 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:47 crc kubenswrapper[4857]: I1128 13:40:47.862563 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" podUID="2b7a5ae9-9951-4c40-9f14-18db9ef9084a" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.144:5353: connect: connection refused" Nov 28 13:40:48 crc kubenswrapper[4857]: W1128 13:40:48.051514 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod87c314fb_4c6d_4182_bd6f_a0b8bf66ecaa.slice/crio-90a1d135eac8123f28e076e4f4eb9271cae9df227a1da250c9a71664dd80a384 WatchSource:0}: Error finding container 90a1d135eac8123f28e076e4f4eb9271cae9df227a1da250c9a71664dd80a384: Status 404 returned error can't find the container with id 90a1d135eac8123f28e076e4f4eb9271cae9df227a1da250c9a71664dd80a384 Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.105903 4857 scope.go:117] "RemoveContainer" containerID="7a5195067a244a982b0bd02b88a4005109465de1b7807e494f20b75b989d8ef5" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.304520 4857 scope.go:117] "RemoveContainer" containerID="4d84cdd67c7b9f9ffce893f5cc215dc8a62a5a3769a4deb481b7b9da49bd8328" Nov 28 13:40:48 crc kubenswrapper[4857]: E1128 13:40:48.305414 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d84cdd67c7b9f9ffce893f5cc215dc8a62a5a3769a4deb481b7b9da49bd8328\": container with ID starting with 4d84cdd67c7b9f9ffce893f5cc215dc8a62a5a3769a4deb481b7b9da49bd8328 not found: ID does not exist" containerID="4d84cdd67c7b9f9ffce893f5cc215dc8a62a5a3769a4deb481b7b9da49bd8328" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.305454 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d84cdd67c7b9f9ffce893f5cc215dc8a62a5a3769a4deb481b7b9da49bd8328"} err="failed to get container status \"4d84cdd67c7b9f9ffce893f5cc215dc8a62a5a3769a4deb481b7b9da49bd8328\": rpc error: code = NotFound desc = could not find container \"4d84cdd67c7b9f9ffce893f5cc215dc8a62a5a3769a4deb481b7b9da49bd8328\": container with ID starting with 4d84cdd67c7b9f9ffce893f5cc215dc8a62a5a3769a4deb481b7b9da49bd8328 not found: ID does not exist" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.305493 4857 scope.go:117] "RemoveContainer" containerID="7a5195067a244a982b0bd02b88a4005109465de1b7807e494f20b75b989d8ef5" Nov 28 13:40:48 crc kubenswrapper[4857]: E1128 13:40:48.305952 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a5195067a244a982b0bd02b88a4005109465de1b7807e494f20b75b989d8ef5\": container with ID starting with 7a5195067a244a982b0bd02b88a4005109465de1b7807e494f20b75b989d8ef5 not found: ID does not exist" containerID="7a5195067a244a982b0bd02b88a4005109465de1b7807e494f20b75b989d8ef5" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.305999 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a5195067a244a982b0bd02b88a4005109465de1b7807e494f20b75b989d8ef5"} err="failed to get container status \"7a5195067a244a982b0bd02b88a4005109465de1b7807e494f20b75b989d8ef5\": rpc error: code = NotFound desc = could not find container \"7a5195067a244a982b0bd02b88a4005109465de1b7807e494f20b75b989d8ef5\": container with ID starting with 7a5195067a244a982b0bd02b88a4005109465de1b7807e494f20b75b989d8ef5 not found: ID does not exist" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.306015 4857 scope.go:117] "RemoveContainer" containerID="4d84cdd67c7b9f9ffce893f5cc215dc8a62a5a3769a4deb481b7b9da49bd8328" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.306420 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d84cdd67c7b9f9ffce893f5cc215dc8a62a5a3769a4deb481b7b9da49bd8328"} err="failed to get container status \"4d84cdd67c7b9f9ffce893f5cc215dc8a62a5a3769a4deb481b7b9da49bd8328\": rpc error: code = NotFound desc = could not find container \"4d84cdd67c7b9f9ffce893f5cc215dc8a62a5a3769a4deb481b7b9da49bd8328\": container with ID starting with 4d84cdd67c7b9f9ffce893f5cc215dc8a62a5a3769a4deb481b7b9da49bd8328 not found: ID does not exist" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.306437 4857 scope.go:117] "RemoveContainer" containerID="7a5195067a244a982b0bd02b88a4005109465de1b7807e494f20b75b989d8ef5" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.307081 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a5195067a244a982b0bd02b88a4005109465de1b7807e494f20b75b989d8ef5"} err="failed to get container status \"7a5195067a244a982b0bd02b88a4005109465de1b7807e494f20b75b989d8ef5\": rpc error: code = NotFound desc = could not find container \"7a5195067a244a982b0bd02b88a4005109465de1b7807e494f20b75b989d8ef5\": container with ID starting with 7a5195067a244a982b0bd02b88a4005109465de1b7807e494f20b75b989d8ef5 not found: ID does not exist" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.377587 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa","Type":"ContainerStarted","Data":"90a1d135eac8123f28e076e4f4eb9271cae9df227a1da250c9a71664dd80a384"} Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.515627 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.597394 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ddmwk\" (UniqueName: \"kubernetes.io/projected/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-kube-api-access-ddmwk\") pod \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.597721 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-ovsdbserver-sb\") pod \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.597803 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-dns-svc\") pod \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.597840 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-ovsdbserver-nb\") pod \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.597891 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-dns-swift-storage-0\") pod \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.597976 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-config\") pod \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\" (UID: \"2b7a5ae9-9951-4c40-9f14-18db9ef9084a\") " Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.615927 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-kube-api-access-ddmwk" (OuterVolumeSpecName: "kube-api-access-ddmwk") pod "2b7a5ae9-9951-4c40-9f14-18db9ef9084a" (UID: "2b7a5ae9-9951-4c40-9f14-18db9ef9084a"). InnerVolumeSpecName "kube-api-access-ddmwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.650006 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-config" (OuterVolumeSpecName: "config") pod "2b7a5ae9-9951-4c40-9f14-18db9ef9084a" (UID: "2b7a5ae9-9951-4c40-9f14-18db9ef9084a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.671259 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2b7a5ae9-9951-4c40-9f14-18db9ef9084a" (UID: "2b7a5ae9-9951-4c40-9f14-18db9ef9084a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.681253 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2b7a5ae9-9951-4c40-9f14-18db9ef9084a" (UID: "2b7a5ae9-9951-4c40-9f14-18db9ef9084a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.695491 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2b7a5ae9-9951-4c40-9f14-18db9ef9084a" (UID: "2b7a5ae9-9951-4c40-9f14-18db9ef9084a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.700792 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ddmwk\" (UniqueName: \"kubernetes.io/projected/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-kube-api-access-ddmwk\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.701136 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.701221 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.701274 4857 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.701335 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.702370 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2b7a5ae9-9951-4c40-9f14-18db9ef9084a" (UID: "2b7a5ae9-9951-4c40-9f14-18db9ef9084a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.744792 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.801519 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-749fd8cf96-rbd6r"] Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.802493 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b7a5ae9-9951-4c40-9f14-18db9ef9084a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.871237 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5f4cb87f5f-m76pk"] Nov 28 13:40:48 crc kubenswrapper[4857]: I1128 13:40:48.885860 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-6c6sw"] Nov 28 13:40:48 crc kubenswrapper[4857]: W1128 13:40:48.942013 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podadfd05de_d1db_45d3_aea1_b35dc0110b71.slice/crio-8b9e52427797212791fc67ee21db92de7bbbcb3a3e285875fba5f906607faecb WatchSource:0}: Error finding container 8b9e52427797212791fc67ee21db92de7bbbcb3a3e285875fba5f906607faecb: Status 404 returned error can't find the container with id 8b9e52427797212791fc67ee21db92de7bbbcb3a3e285875fba5f906607faecb Nov 28 13:40:49 crc kubenswrapper[4857]: I1128 13:40:49.226279 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7d4894d65-gqnvs"] Nov 28 13:40:49 crc kubenswrapper[4857]: W1128 13:40:49.238460 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod151aff2f_7aaa_4964_8f75_51c8faf86397.slice/crio-142e02233648c3cfe3264c0b78da1d10fe0c212642b98af63993ac8735fc68e1 WatchSource:0}: Error finding container 142e02233648c3cfe3264c0b78da1d10fe0c212642b98af63993ac8735fc68e1: Status 404 returned error can't find the container with id 142e02233648c3cfe3264c0b78da1d10fe0c212642b98af63993ac8735fc68e1 Nov 28 13:40:49 crc kubenswrapper[4857]: W1128 13:40:49.281589 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3fcb638a_dab8_414e_9d24_e49c8437672d.slice/crio-8500d968792fa6ebcdfd06932333ee1050adfe957118738a54feed91470eda76 WatchSource:0}: Error finding container 8500d968792fa6ebcdfd06932333ee1050adfe957118738a54feed91470eda76: Status 404 returned error can't find the container with id 8500d968792fa6ebcdfd06932333ee1050adfe957118738a54feed91470eda76 Nov 28 13:40:49 crc kubenswrapper[4857]: I1128 13:40:49.282555 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5b54986f64-dxw54"] Nov 28 13:40:49 crc kubenswrapper[4857]: I1128 13:40:49.391830 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" event={"ID":"e96f8a95-2dec-4981-876b-869966a67b72","Type":"ContainerStarted","Data":"b45de0a46458e9f92807085970245ace4538255233f841beec59b789730b57f0"} Nov 28 13:40:49 crc kubenswrapper[4857]: I1128 13:40:49.396204 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dd1c47cb-0f7e-42f1-824b-a6cef692c751","Type":"ContainerStarted","Data":"45ab71fd172102a32778d57deb04ace9af8696a6011cee74dcc94ab1721f814a"} Nov 28 13:40:49 crc kubenswrapper[4857]: I1128 13:40:49.404223 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" event={"ID":"2b7a5ae9-9951-4c40-9f14-18db9ef9084a","Type":"ContainerDied","Data":"d80f9a2a2b9793b20b87e03187921bb5b261d021f5fd58e75c087700dd4e8d1c"} Nov 28 13:40:49 crc kubenswrapper[4857]: I1128 13:40:49.404395 4857 scope.go:117] "RemoveContainer" containerID="59947e48b393a942777a6a3175b36ea339cd93ce33c07162c6e9cd5791ad24cb" Nov 28 13:40:49 crc kubenswrapper[4857]: I1128 13:40:49.404577 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-xt2qc" Nov 28 13:40:49 crc kubenswrapper[4857]: I1128 13:40:49.407456 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5b54986f64-dxw54" event={"ID":"3fcb638a-dab8-414e-9d24-e49c8437672d","Type":"ContainerStarted","Data":"8500d968792fa6ebcdfd06932333ee1050adfe957118738a54feed91470eda76"} Nov 28 13:40:49 crc kubenswrapper[4857]: I1128 13:40:49.410509 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-749fd8cf96-rbd6r" event={"ID":"946c0669-4c99-46b7-a9ff-437042383642","Type":"ContainerStarted","Data":"28d378399a22b6465e842cbbbb1052a8ad7a94336fe8212837dfd391a9ef8de6"} Nov 28 13:40:49 crc kubenswrapper[4857]: I1128 13:40:49.416921 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7d4894d65-gqnvs" event={"ID":"151aff2f-7aaa-4964-8f75-51c8faf86397","Type":"ContainerStarted","Data":"142e02233648c3cfe3264c0b78da1d10fe0c212642b98af63993ac8735fc68e1"} Nov 28 13:40:49 crc kubenswrapper[4857]: I1128 13:40:49.419140 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5f4cb87f5f-m76pk" event={"ID":"adfd05de-d1db-45d3-aea1-b35dc0110b71","Type":"ContainerStarted","Data":"8b9e52427797212791fc67ee21db92de7bbbcb3a3e285875fba5f906607faecb"} Nov 28 13:40:49 crc kubenswrapper[4857]: I1128 13:40:49.420188 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"aa5c6527-63ae-4b20-b497-8b7abe609110","Type":"ContainerStarted","Data":"88f91a9d2294254074d637616c9d35e5a0d4dd1b1df56ac9858e363f1e7fb0b5"} Nov 28 13:40:49 crc kubenswrapper[4857]: I1128 13:40:49.454435 4857 scope.go:117] "RemoveContainer" containerID="7a5f00c3945d3734a4fd6a2de8380d88c0a0f781f2c72e162e35e213c6d0b74c" Nov 28 13:40:49 crc kubenswrapper[4857]: I1128 13:40:49.473634 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-xt2qc"] Nov 28 13:40:49 crc kubenswrapper[4857]: I1128 13:40:49.482135 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-xt2qc"] Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.325286 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b7a5ae9-9951-4c40-9f14-18db9ef9084a" path="/var/lib/kubelet/pods/2b7a5ae9-9951-4c40-9f14-18db9ef9084a/volumes" Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.443004 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"aa5c6527-63ae-4b20-b497-8b7abe609110","Type":"ContainerStarted","Data":"2e90c406f3ce5f6e243a58b3a523f5bbbd5c50f0ea05fb2946f569018ff927c0"} Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.460734 4857 generic.go:334] "Generic (PLEG): container finished" podID="e96f8a95-2dec-4981-876b-869966a67b72" containerID="742670a443e33f75e66952e569ed4805604ac305cab57c22b54e2fa2ce2f0864" exitCode=0 Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.460893 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" event={"ID":"e96f8a95-2dec-4981-876b-869966a67b72","Type":"ContainerDied","Data":"742670a443e33f75e66952e569ed4805604ac305cab57c22b54e2fa2ce2f0864"} Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.466736 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa","Type":"ContainerStarted","Data":"df9b47ff8198bdafa7125b2e7a080b861355bfe30230d7f8a3ec52fcee86264f"} Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.471403 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5b54986f64-dxw54" event={"ID":"3fcb638a-dab8-414e-9d24-e49c8437672d","Type":"ContainerStarted","Data":"1b7fd768b948dcb6059f83665784c09b36fe6390b7629371594c8c3421176880"} Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.471807 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5b54986f64-dxw54" event={"ID":"3fcb638a-dab8-414e-9d24-e49c8437672d","Type":"ContainerStarted","Data":"c859c94bc4ee582de35c29d3a653e1f9ca193e86a0cc31648fb667168c21e37b"} Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.472632 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.496770 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-749fd8cf96-rbd6r" event={"ID":"946c0669-4c99-46b7-a9ff-437042383642","Type":"ContainerStarted","Data":"4bdd0ee5b2dc8d0eba75e5970152f8cfe9df74f09930b295ed3cf6ddb62ac999"} Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.496823 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-749fd8cf96-rbd6r" event={"ID":"946c0669-4c99-46b7-a9ff-437042383642","Type":"ContainerStarted","Data":"9f2936fe928f6000c1df2fe80515f9fd71cc2a258636283c70afbe2ab56dcf0b"} Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.496868 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.496961 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.501694 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7d4894d65-gqnvs" event={"ID":"151aff2f-7aaa-4964-8f75-51c8faf86397","Type":"ContainerStarted","Data":"ec785d0624d75a82e22bd01f7edfc8b3b369f0fa8f2251c36725e8484e0c04f0"} Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.501763 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7d4894d65-gqnvs" event={"ID":"151aff2f-7aaa-4964-8f75-51c8faf86397","Type":"ContainerStarted","Data":"a6df09e46a84bd7457d48eb96b91f30ec9076cb7712cd5e8e714009a5e5ee6d2"} Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.502611 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.510360 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5b54986f64-dxw54" podStartSLOduration=7.510343441 podStartE2EDuration="7.510343441s" podCreationTimestamp="2025-11-28 13:40:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:40:50.502967607 +0000 UTC m=+1342.530342774" watchObservedRunningTime="2025-11-28 13:40:50.510343441 +0000 UTC m=+1342.537718618" Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.510974 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5f4cb87f5f-m76pk" event={"ID":"adfd05de-d1db-45d3-aea1-b35dc0110b71","Type":"ContainerStarted","Data":"c6ba92e4d979c8b69f5fe686fd993ea41a62407d93386f6c437c35d1fe1b2018"} Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.511239 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.540566 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7d4894d65-gqnvs" podStartSLOduration=4.540546065 podStartE2EDuration="4.540546065s" podCreationTimestamp="2025-11-28 13:40:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:40:50.527347763 +0000 UTC m=+1342.554722930" watchObservedRunningTime="2025-11-28 13:40:50.540546065 +0000 UTC m=+1342.567921242" Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.586495 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-749fd8cf96-rbd6r" podStartSLOduration=7.586472054 podStartE2EDuration="7.586472054s" podCreationTimestamp="2025-11-28 13:40:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:40:50.554159369 +0000 UTC m=+1342.581534536" watchObservedRunningTime="2025-11-28 13:40:50.586472054 +0000 UTC m=+1342.613847221" Nov 28 13:40:50 crc kubenswrapper[4857]: I1128 13:40:50.610210 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-5f4cb87f5f-m76pk" podStartSLOduration=4.610189961 podStartE2EDuration="4.610189961s" podCreationTimestamp="2025-11-28 13:40:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:40:50.580044198 +0000 UTC m=+1342.607419365" watchObservedRunningTime="2025-11-28 13:40:50.610189961 +0000 UTC m=+1342.637565128" Nov 28 13:40:51 crc kubenswrapper[4857]: I1128 13:40:51.522188 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa","Type":"ContainerStarted","Data":"e54d67a38fbee3a524172c908eaa8f9b96b59b1bd613edfba5494e6dc6f75b3e"} Nov 28 13:40:52 crc kubenswrapper[4857]: I1128 13:40:52.535889 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-sp8xb" event={"ID":"779e7a51-657e-47ae-a068-3cd339cd9bb1","Type":"ContainerStarted","Data":"9a058d07523d9997875c4df9672d3c2618e01acbee6d4a401522e8f25cb2b82f"} Nov 28 13:40:53 crc kubenswrapper[4857]: I1128 13:40:53.545459 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"aa5c6527-63ae-4b20-b497-8b7abe609110","Type":"ContainerStarted","Data":"c065e75bcff4982d3c580553c3075a92f37176a3153b6c5c8ecbfbaecc74b5c5"} Nov 28 13:40:53 crc kubenswrapper[4857]: I1128 13:40:53.547869 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" event={"ID":"e96f8a95-2dec-4981-876b-869966a67b72","Type":"ContainerStarted","Data":"89da98a0c10b1faa48fafd6ba314782afe1ae3811d31a641ddb07be661fdbe5e"} Nov 28 13:40:53 crc kubenswrapper[4857]: I1128 13:40:53.547931 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:53 crc kubenswrapper[4857]: I1128 13:40:53.707819 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=10.707800202 podStartE2EDuration="10.707800202s" podCreationTimestamp="2025-11-28 13:40:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:40:53.68250724 +0000 UTC m=+1345.709882417" watchObservedRunningTime="2025-11-28 13:40:53.707800202 +0000 UTC m=+1345.735175369" Nov 28 13:40:53 crc kubenswrapper[4857]: I1128 13:40:53.707936 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-sp8xb" podStartSLOduration=5.404950523 podStartE2EDuration="39.707932826s" podCreationTimestamp="2025-11-28 13:40:14 +0000 UTC" firstStartedPulling="2025-11-28 13:40:15.6083313 +0000 UTC m=+1307.635706467" lastFinishedPulling="2025-11-28 13:40:49.911313603 +0000 UTC m=+1341.938688770" observedRunningTime="2025-11-28 13:40:53.69976727 +0000 UTC m=+1345.727142437" watchObservedRunningTime="2025-11-28 13:40:53.707932826 +0000 UTC m=+1345.735307993" Nov 28 13:40:53 crc kubenswrapper[4857]: I1128 13:40:53.727138 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=11.727121651000001 podStartE2EDuration="11.727121651s" podCreationTimestamp="2025-11-28 13:40:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:40:53.72120678 +0000 UTC m=+1345.748581957" watchObservedRunningTime="2025-11-28 13:40:53.727121651 +0000 UTC m=+1345.754496818" Nov 28 13:40:53 crc kubenswrapper[4857]: I1128 13:40:53.743841 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" podStartSLOduration=10.743815185 podStartE2EDuration="10.743815185s" podCreationTimestamp="2025-11-28 13:40:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:40:53.739881501 +0000 UTC m=+1345.767256678" watchObservedRunningTime="2025-11-28 13:40:53.743815185 +0000 UTC m=+1345.771190352" Nov 28 13:40:54 crc kubenswrapper[4857]: I1128 13:40:54.078353 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 13:40:54 crc kubenswrapper[4857]: I1128 13:40:54.078434 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 13:40:54 crc kubenswrapper[4857]: I1128 13:40:54.113422 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 13:40:54 crc kubenswrapper[4857]: I1128 13:40:54.122207 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 13:40:54 crc kubenswrapper[4857]: I1128 13:40:54.557935 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 13:40:54 crc kubenswrapper[4857]: I1128 13:40:54.558247 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 13:40:55 crc kubenswrapper[4857]: I1128 13:40:55.570123 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-g9jf5" event={"ID":"d7eee1cb-c5d6-45e4-a007-0d29935cd83a","Type":"ContainerStarted","Data":"43803a839e238f33736b9e11cf3a9902b5274095c543dca6a4d5d4938e97537d"} Nov 28 13:40:55 crc kubenswrapper[4857]: I1128 13:40:55.581192 4857 generic.go:334] "Generic (PLEG): container finished" podID="779e7a51-657e-47ae-a068-3cd339cd9bb1" containerID="9a058d07523d9997875c4df9672d3c2618e01acbee6d4a401522e8f25cb2b82f" exitCode=0 Nov 28 13:40:55 crc kubenswrapper[4857]: I1128 13:40:55.581277 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-sp8xb" event={"ID":"779e7a51-657e-47ae-a068-3cd339cd9bb1","Type":"ContainerDied","Data":"9a058d07523d9997875c4df9672d3c2618e01acbee6d4a401522e8f25cb2b82f"} Nov 28 13:40:55 crc kubenswrapper[4857]: I1128 13:40:55.603674 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-g9jf5" podStartSLOduration=2.71190237 podStartE2EDuration="41.603652183s" podCreationTimestamp="2025-11-28 13:40:14 +0000 UTC" firstStartedPulling="2025-11-28 13:40:15.150034666 +0000 UTC m=+1307.177409823" lastFinishedPulling="2025-11-28 13:40:54.041784469 +0000 UTC m=+1346.069159636" observedRunningTime="2025-11-28 13:40:55.600744649 +0000 UTC m=+1347.628119816" watchObservedRunningTime="2025-11-28 13:40:55.603652183 +0000 UTC m=+1347.631027350" Nov 28 13:40:56 crc kubenswrapper[4857]: I1128 13:40:56.625569 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 13:40:58 crc kubenswrapper[4857]: I1128 13:40:58.897639 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-sp8xb" Nov 28 13:40:58 crc kubenswrapper[4857]: I1128 13:40:58.943916 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:40:59 crc kubenswrapper[4857]: I1128 13:40:59.006253 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-kn9cb"] Nov 28 13:40:59 crc kubenswrapper[4857]: I1128 13:40:59.006541 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" podUID="992a1ed2-a6f4-44c2-9cb9-73857ba4d53a" containerName="dnsmasq-dns" containerID="cri-o://a25de7ed9c22fbbc2ed87a5ab2c3b5470a0250a9afdd348f295f0cfad5fe9cc9" gracePeriod=10 Nov 28 13:40:59 crc kubenswrapper[4857]: I1128 13:40:59.017465 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/779e7a51-657e-47ae-a068-3cd339cd9bb1-db-sync-config-data\") pod \"779e7a51-657e-47ae-a068-3cd339cd9bb1\" (UID: \"779e7a51-657e-47ae-a068-3cd339cd9bb1\") " Nov 28 13:40:59 crc kubenswrapper[4857]: I1128 13:40:59.017516 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l5bnm\" (UniqueName: \"kubernetes.io/projected/779e7a51-657e-47ae-a068-3cd339cd9bb1-kube-api-access-l5bnm\") pod \"779e7a51-657e-47ae-a068-3cd339cd9bb1\" (UID: \"779e7a51-657e-47ae-a068-3cd339cd9bb1\") " Nov 28 13:40:59 crc kubenswrapper[4857]: I1128 13:40:59.017576 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/779e7a51-657e-47ae-a068-3cd339cd9bb1-combined-ca-bundle\") pod \"779e7a51-657e-47ae-a068-3cd339cd9bb1\" (UID: \"779e7a51-657e-47ae-a068-3cd339cd9bb1\") " Nov 28 13:40:59 crc kubenswrapper[4857]: I1128 13:40:59.035823 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/779e7a51-657e-47ae-a068-3cd339cd9bb1-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "779e7a51-657e-47ae-a068-3cd339cd9bb1" (UID: "779e7a51-657e-47ae-a068-3cd339cd9bb1"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:59 crc kubenswrapper[4857]: I1128 13:40:59.036589 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/779e7a51-657e-47ae-a068-3cd339cd9bb1-kube-api-access-l5bnm" (OuterVolumeSpecName: "kube-api-access-l5bnm") pod "779e7a51-657e-47ae-a068-3cd339cd9bb1" (UID: "779e7a51-657e-47ae-a068-3cd339cd9bb1"). InnerVolumeSpecName "kube-api-access-l5bnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:40:59 crc kubenswrapper[4857]: I1128 13:40:59.074013 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/779e7a51-657e-47ae-a068-3cd339cd9bb1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "779e7a51-657e-47ae-a068-3cd339cd9bb1" (UID: "779e7a51-657e-47ae-a068-3cd339cd9bb1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:40:59 crc kubenswrapper[4857]: I1128 13:40:59.119579 4857 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/779e7a51-657e-47ae-a068-3cd339cd9bb1-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:59 crc kubenswrapper[4857]: I1128 13:40:59.119611 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l5bnm\" (UniqueName: \"kubernetes.io/projected/779e7a51-657e-47ae-a068-3cd339cd9bb1-kube-api-access-l5bnm\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:59 crc kubenswrapper[4857]: I1128 13:40:59.119622 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/779e7a51-657e-47ae-a068-3cd339cd9bb1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:40:59 crc kubenswrapper[4857]: I1128 13:40:59.654693 4857 generic.go:334] "Generic (PLEG): container finished" podID="992a1ed2-a6f4-44c2-9cb9-73857ba4d53a" containerID="a25de7ed9c22fbbc2ed87a5ab2c3b5470a0250a9afdd348f295f0cfad5fe9cc9" exitCode=0 Nov 28 13:40:59 crc kubenswrapper[4857]: I1128 13:40:59.654788 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" event={"ID":"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a","Type":"ContainerDied","Data":"a25de7ed9c22fbbc2ed87a5ab2c3b5470a0250a9afdd348f295f0cfad5fe9cc9"} Nov 28 13:40:59 crc kubenswrapper[4857]: I1128 13:40:59.657289 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-sp8xb" event={"ID":"779e7a51-657e-47ae-a068-3cd339cd9bb1","Type":"ContainerDied","Data":"1dde1b328dd9aa7f64075ffb1b4459c3ad103546bdfd3bac9e98b01180538ef2"} Nov 28 13:40:59 crc kubenswrapper[4857]: I1128 13:40:59.657316 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1dde1b328dd9aa7f64075ffb1b4459c3ad103546bdfd3bac9e98b01180538ef2" Nov 28 13:40:59 crc kubenswrapper[4857]: I1128 13:40:59.657361 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-sp8xb" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.231030 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-56664b65dc-mkdgh"] Nov 28 13:41:00 crc kubenswrapper[4857]: E1128 13:41:00.231440 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b7a5ae9-9951-4c40-9f14-18db9ef9084a" containerName="init" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.231464 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b7a5ae9-9951-4c40-9f14-18db9ef9084a" containerName="init" Nov 28 13:41:00 crc kubenswrapper[4857]: E1128 13:41:00.231498 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="779e7a51-657e-47ae-a068-3cd339cd9bb1" containerName="barbican-db-sync" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.231505 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="779e7a51-657e-47ae-a068-3cd339cd9bb1" containerName="barbican-db-sync" Nov 28 13:41:00 crc kubenswrapper[4857]: E1128 13:41:00.231523 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b7a5ae9-9951-4c40-9f14-18db9ef9084a" containerName="dnsmasq-dns" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.231530 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b7a5ae9-9951-4c40-9f14-18db9ef9084a" containerName="dnsmasq-dns" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.231712 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="779e7a51-657e-47ae-a068-3cd339cd9bb1" containerName="barbican-db-sync" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.231731 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b7a5ae9-9951-4c40-9f14-18db9ef9084a" containerName="dnsmasq-dns" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.232632 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.237320 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.237503 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-rd8bj" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.237993 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.243399 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-9689bdb94-frvhg"] Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.244938 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f411fba7-d7b2-4d97-9388-c1b6f57e8328-combined-ca-bundle\") pod \"barbican-worker-56664b65dc-mkdgh\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.245044 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f411fba7-d7b2-4d97-9388-c1b6f57e8328-config-data-custom\") pod \"barbican-worker-56664b65dc-mkdgh\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.245071 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f411fba7-d7b2-4d97-9388-c1b6f57e8328-config-data\") pod \"barbican-worker-56664b65dc-mkdgh\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.245128 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f411fba7-d7b2-4d97-9388-c1b6f57e8328-logs\") pod \"barbican-worker-56664b65dc-mkdgh\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.245154 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t66xl\" (UniqueName: \"kubernetes.io/projected/f411fba7-d7b2-4d97-9388-c1b6f57e8328-kube-api-access-t66xl\") pod \"barbican-worker-56664b65dc-mkdgh\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.245218 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.247673 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.309103 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-9689bdb94-frvhg"] Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.332820 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-vlrhh"] Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.334739 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.347290 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f411fba7-d7b2-4d97-9388-c1b6f57e8328-combined-ca-bundle\") pod \"barbican-worker-56664b65dc-mkdgh\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.347342 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d2e145c-5068-4dff-a35e-14fe385cdcf2-config-data-custom\") pod \"barbican-keystone-listener-9689bdb94-frvhg\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.347441 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d2e145c-5068-4dff-a35e-14fe385cdcf2-config-data\") pod \"barbican-keystone-listener-9689bdb94-frvhg\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.347465 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d2e145c-5068-4dff-a35e-14fe385cdcf2-logs\") pod \"barbican-keystone-listener-9689bdb94-frvhg\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.347501 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f411fba7-d7b2-4d97-9388-c1b6f57e8328-config-data-custom\") pod \"barbican-worker-56664b65dc-mkdgh\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.347522 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f411fba7-d7b2-4d97-9388-c1b6f57e8328-config-data\") pod \"barbican-worker-56664b65dc-mkdgh\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.347559 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zsgzm\" (UniqueName: \"kubernetes.io/projected/e578310f-afa3-4d86-8115-98baf09ff7ef-kube-api-access-zsgzm\") pod \"dnsmasq-dns-848cf88cfc-vlrhh\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.347595 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-vlrhh\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.347620 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-vlrhh\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.347660 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d2e145c-5068-4dff-a35e-14fe385cdcf2-combined-ca-bundle\") pod \"barbican-keystone-listener-9689bdb94-frvhg\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.347687 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v64tr\" (UniqueName: \"kubernetes.io/projected/0d2e145c-5068-4dff-a35e-14fe385cdcf2-kube-api-access-v64tr\") pod \"barbican-keystone-listener-9689bdb94-frvhg\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.347704 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-vlrhh\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.347724 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f411fba7-d7b2-4d97-9388-c1b6f57e8328-logs\") pod \"barbican-worker-56664b65dc-mkdgh\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.347764 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-config\") pod \"dnsmasq-dns-848cf88cfc-vlrhh\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.347804 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t66xl\" (UniqueName: \"kubernetes.io/projected/f411fba7-d7b2-4d97-9388-c1b6f57e8328-kube-api-access-t66xl\") pod \"barbican-worker-56664b65dc-mkdgh\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.347852 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-vlrhh\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.365371 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f411fba7-d7b2-4d97-9388-c1b6f57e8328-combined-ca-bundle\") pod \"barbican-worker-56664b65dc-mkdgh\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.368231 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f411fba7-d7b2-4d97-9388-c1b6f57e8328-config-data-custom\") pod \"barbican-worker-56664b65dc-mkdgh\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.373250 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f411fba7-d7b2-4d97-9388-c1b6f57e8328-logs\") pod \"barbican-worker-56664b65dc-mkdgh\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.381305 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f411fba7-d7b2-4d97-9388-c1b6f57e8328-config-data\") pod \"barbican-worker-56664b65dc-mkdgh\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.397810 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-vlrhh"] Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.401263 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t66xl\" (UniqueName: \"kubernetes.io/projected/f411fba7-d7b2-4d97-9388-c1b6f57e8328-kube-api-access-t66xl\") pod \"barbican-worker-56664b65dc-mkdgh\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.455076 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-56664b65dc-mkdgh"] Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.466556 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zsgzm\" (UniqueName: \"kubernetes.io/projected/e578310f-afa3-4d86-8115-98baf09ff7ef-kube-api-access-zsgzm\") pod \"dnsmasq-dns-848cf88cfc-vlrhh\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.466654 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-vlrhh\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.466695 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-vlrhh\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.466766 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d2e145c-5068-4dff-a35e-14fe385cdcf2-combined-ca-bundle\") pod \"barbican-keystone-listener-9689bdb94-frvhg\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.466795 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v64tr\" (UniqueName: \"kubernetes.io/projected/0d2e145c-5068-4dff-a35e-14fe385cdcf2-kube-api-access-v64tr\") pod \"barbican-keystone-listener-9689bdb94-frvhg\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.466821 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-vlrhh\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.466870 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-config\") pod \"dnsmasq-dns-848cf88cfc-vlrhh\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.466953 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-vlrhh\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.467045 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d2e145c-5068-4dff-a35e-14fe385cdcf2-config-data-custom\") pod \"barbican-keystone-listener-9689bdb94-frvhg\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.467187 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d2e145c-5068-4dff-a35e-14fe385cdcf2-config-data\") pod \"barbican-keystone-listener-9689bdb94-frvhg\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.467214 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d2e145c-5068-4dff-a35e-14fe385cdcf2-logs\") pod \"barbican-keystone-listener-9689bdb94-frvhg\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.470109 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-vlrhh\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.471058 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-vlrhh\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.471837 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-vlrhh\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.475939 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d2e145c-5068-4dff-a35e-14fe385cdcf2-combined-ca-bundle\") pod \"barbican-keystone-listener-9689bdb94-frvhg\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.478504 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-config\") pod \"dnsmasq-dns-848cf88cfc-vlrhh\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.479576 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-vlrhh\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.480038 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d2e145c-5068-4dff-a35e-14fe385cdcf2-logs\") pod \"barbican-keystone-listener-9689bdb94-frvhg\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.500628 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5f5cc64f6b-5tvl6"] Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.503436 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.504695 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zsgzm\" (UniqueName: \"kubernetes.io/projected/e578310f-afa3-4d86-8115-98baf09ff7ef-kube-api-access-zsgzm\") pod \"dnsmasq-dns-848cf88cfc-vlrhh\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.507101 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d2e145c-5068-4dff-a35e-14fe385cdcf2-config-data\") pod \"barbican-keystone-listener-9689bdb94-frvhg\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.511256 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v64tr\" (UniqueName: \"kubernetes.io/projected/0d2e145c-5068-4dff-a35e-14fe385cdcf2-kube-api-access-v64tr\") pod \"barbican-keystone-listener-9689bdb94-frvhg\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.511776 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d2e145c-5068-4dff-a35e-14fe385cdcf2-config-data-custom\") pod \"barbican-keystone-listener-9689bdb94-frvhg\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.511880 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.549853 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5f5cc64f6b-5tvl6"] Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.570581 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.593891 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.599699 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.669620 4857 generic.go:334] "Generic (PLEG): container finished" podID="d7eee1cb-c5d6-45e4-a007-0d29935cd83a" containerID="43803a839e238f33736b9e11cf3a9902b5274095c543dca6a4d5d4938e97537d" exitCode=0 Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.669952 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-g9jf5" event={"ID":"d7eee1cb-c5d6-45e4-a007-0d29935cd83a","Type":"ContainerDied","Data":"43803a839e238f33736b9e11cf3a9902b5274095c543dca6a4d5d4938e97537d"} Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.675191 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2r7l4\" (UniqueName: \"kubernetes.io/projected/fa95429a-5622-4bcb-a065-8ff916c55bb9-kube-api-access-2r7l4\") pod \"barbican-api-5f5cc64f6b-5tvl6\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.675253 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa95429a-5622-4bcb-a065-8ff916c55bb9-combined-ca-bundle\") pod \"barbican-api-5f5cc64f6b-5tvl6\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.675294 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa95429a-5622-4bcb-a065-8ff916c55bb9-logs\") pod \"barbican-api-5f5cc64f6b-5tvl6\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.675344 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fa95429a-5622-4bcb-a065-8ff916c55bb9-config-data-custom\") pod \"barbican-api-5f5cc64f6b-5tvl6\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.675423 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa95429a-5622-4bcb-a065-8ff916c55bb9-config-data\") pod \"barbican-api-5f5cc64f6b-5tvl6\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.776949 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa95429a-5622-4bcb-a065-8ff916c55bb9-config-data\") pod \"barbican-api-5f5cc64f6b-5tvl6\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.777057 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2r7l4\" (UniqueName: \"kubernetes.io/projected/fa95429a-5622-4bcb-a065-8ff916c55bb9-kube-api-access-2r7l4\") pod \"barbican-api-5f5cc64f6b-5tvl6\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.777097 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa95429a-5622-4bcb-a065-8ff916c55bb9-combined-ca-bundle\") pod \"barbican-api-5f5cc64f6b-5tvl6\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.777115 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa95429a-5622-4bcb-a065-8ff916c55bb9-logs\") pod \"barbican-api-5f5cc64f6b-5tvl6\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.777147 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fa95429a-5622-4bcb-a065-8ff916c55bb9-config-data-custom\") pod \"barbican-api-5f5cc64f6b-5tvl6\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.778353 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa95429a-5622-4bcb-a065-8ff916c55bb9-logs\") pod \"barbican-api-5f5cc64f6b-5tvl6\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.783267 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa95429a-5622-4bcb-a065-8ff916c55bb9-config-data\") pod \"barbican-api-5f5cc64f6b-5tvl6\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.784544 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fa95429a-5622-4bcb-a065-8ff916c55bb9-config-data-custom\") pod \"barbican-api-5f5cc64f6b-5tvl6\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.786386 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa95429a-5622-4bcb-a065-8ff916c55bb9-combined-ca-bundle\") pod \"barbican-api-5f5cc64f6b-5tvl6\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.799257 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2r7l4\" (UniqueName: \"kubernetes.io/projected/fa95429a-5622-4bcb-a065-8ff916c55bb9-kube-api-access-2r7l4\") pod \"barbican-api-5f5cc64f6b-5tvl6\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:00 crc kubenswrapper[4857]: I1128 13:41:00.917895 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.457055 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.601882 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-dns-svc\") pod \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.601917 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ps8vt\" (UniqueName: \"kubernetes.io/projected/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-kube-api-access-ps8vt\") pod \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.601951 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-ovsdbserver-sb\") pod \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.601973 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-config\") pod \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.602060 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-ovsdbserver-nb\") pod \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.602184 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-dns-swift-storage-0\") pod \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\" (UID: \"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a\") " Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.629604 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-kube-api-access-ps8vt" (OuterVolumeSpecName: "kube-api-access-ps8vt") pod "992a1ed2-a6f4-44c2-9cb9-73857ba4d53a" (UID: "992a1ed2-a6f4-44c2-9cb9-73857ba4d53a"). InnerVolumeSpecName "kube-api-access-ps8vt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.661081 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "992a1ed2-a6f4-44c2-9cb9-73857ba4d53a" (UID: "992a1ed2-a6f4-44c2-9cb9-73857ba4d53a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.665696 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "992a1ed2-a6f4-44c2-9cb9-73857ba4d53a" (UID: "992a1ed2-a6f4-44c2-9cb9-73857ba4d53a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.702511 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-config" (OuterVolumeSpecName: "config") pod "992a1ed2-a6f4-44c2-9cb9-73857ba4d53a" (UID: "992a1ed2-a6f4-44c2-9cb9-73857ba4d53a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.711369 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.711449 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ps8vt\" (UniqueName: \"kubernetes.io/projected/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-kube-api-access-ps8vt\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.711459 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.711468 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.716466 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerName="ceilometer-central-agent" containerID="cri-o://d64f59fc0d63d8fdddaf62677a40dab269073c3700fca581921792c2837fe785" gracePeriod=30 Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.716554 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dd1c47cb-0f7e-42f1-824b-a6cef692c751","Type":"ContainerStarted","Data":"994efca9eff08e2894a93e0654ccdda0445a5967b2b3b92b4ffb7d9c1232d363"} Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.716591 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.716852 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerName="proxy-httpd" containerID="cri-o://994efca9eff08e2894a93e0654ccdda0445a5967b2b3b92b4ffb7d9c1232d363" gracePeriod=30 Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.716900 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerName="sg-core" containerID="cri-o://45ab71fd172102a32778d57deb04ace9af8696a6011cee74dcc94ab1721f814a" gracePeriod=30 Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.716933 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerName="ceilometer-notification-agent" containerID="cri-o://67d7fc4f5095e379062283d9a025c1c36226cc34ff139a5a7da965bb1e6c4b4a" gracePeriod=30 Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.717585 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "992a1ed2-a6f4-44c2-9cb9-73857ba4d53a" (UID: "992a1ed2-a6f4-44c2-9cb9-73857ba4d53a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.727888 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.727963 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-kn9cb" event={"ID":"992a1ed2-a6f4-44c2-9cb9-73857ba4d53a","Type":"ContainerDied","Data":"d1c3b9b287bc84a3c9dc9bd5fcaac4ad1add17a6f5d6ca9dcc088bbba16e5936"} Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.727998 4857 scope.go:117] "RemoveContainer" containerID="a25de7ed9c22fbbc2ed87a5ab2c3b5470a0250a9afdd348f295f0cfad5fe9cc9" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.729690 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-56664b65dc-mkdgh"] Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.739531 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "992a1ed2-a6f4-44c2-9cb9-73857ba4d53a" (UID: "992a1ed2-a6f4-44c2-9cb9-73857ba4d53a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.749527 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.9128770739999998 podStartE2EDuration="47.749507997s" podCreationTimestamp="2025-11-28 13:40:14 +0000 UTC" firstStartedPulling="2025-11-28 13:40:15.341650482 +0000 UTC m=+1307.369025649" lastFinishedPulling="2025-11-28 13:41:01.178281415 +0000 UTC m=+1353.205656572" observedRunningTime="2025-11-28 13:41:01.745127071 +0000 UTC m=+1353.772502238" watchObservedRunningTime="2025-11-28 13:41:01.749507997 +0000 UTC m=+1353.776883164" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.751149 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-9689bdb94-frvhg"] Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.820778 4857 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.820813 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.863185 4857 scope.go:117] "RemoveContainer" containerID="91ef46368263fb9e62d46672b1da81143900e54aa73f82abdc30a98f231042d2" Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.904001 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5f5cc64f6b-5tvl6"] Nov 28 13:41:01 crc kubenswrapper[4857]: I1128 13:41:01.919579 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-vlrhh"] Nov 28 13:41:01 crc kubenswrapper[4857]: W1128 13:41:01.964855 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa95429a_5622_4bcb_a065_8ff916c55bb9.slice/crio-e1aa5f085e6e2d5ae825822812e4278d0e2224c7030e07384c5bd34e402ad2b8 WatchSource:0}: Error finding container e1aa5f085e6e2d5ae825822812e4278d0e2224c7030e07384c5bd34e402ad2b8: Status 404 returned error can't find the container with id e1aa5f085e6e2d5ae825822812e4278d0e2224c7030e07384c5bd34e402ad2b8 Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.071646 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-kn9cb"] Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.083800 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-kn9cb"] Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.133521 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.231885 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-config-data\") pod \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.232678 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-db-sync-config-data\") pod \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.232838 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-etc-machine-id\") pod \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.232930 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h72dj\" (UniqueName: \"kubernetes.io/projected/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-kube-api-access-h72dj\") pod \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.233030 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-combined-ca-bundle\") pod \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.233314 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-scripts\") pod \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\" (UID: \"d7eee1cb-c5d6-45e4-a007-0d29935cd83a\") " Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.233650 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d7eee1cb-c5d6-45e4-a007-0d29935cd83a" (UID: "d7eee1cb-c5d6-45e4-a007-0d29935cd83a"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.236994 4857 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.237659 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-kube-api-access-h72dj" (OuterVolumeSpecName: "kube-api-access-h72dj") pod "d7eee1cb-c5d6-45e4-a007-0d29935cd83a" (UID: "d7eee1cb-c5d6-45e4-a007-0d29935cd83a"). InnerVolumeSpecName "kube-api-access-h72dj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.248853 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-scripts" (OuterVolumeSpecName: "scripts") pod "d7eee1cb-c5d6-45e4-a007-0d29935cd83a" (UID: "d7eee1cb-c5d6-45e4-a007-0d29935cd83a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.248899 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "d7eee1cb-c5d6-45e4-a007-0d29935cd83a" (UID: "d7eee1cb-c5d6-45e4-a007-0d29935cd83a"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.265957 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d7eee1cb-c5d6-45e4-a007-0d29935cd83a" (UID: "d7eee1cb-c5d6-45e4-a007-0d29935cd83a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.316932 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-config-data" (OuterVolumeSpecName: "config-data") pod "d7eee1cb-c5d6-45e4-a007-0d29935cd83a" (UID: "d7eee1cb-c5d6-45e4-a007-0d29935cd83a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.335661 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="992a1ed2-a6f4-44c2-9cb9-73857ba4d53a" path="/var/lib/kubelet/pods/992a1ed2-a6f4-44c2-9cb9-73857ba4d53a/volumes" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.338624 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h72dj\" (UniqueName: \"kubernetes.io/projected/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-kube-api-access-h72dj\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.338646 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.338656 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.338665 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.338673 4857 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d7eee1cb-c5d6-45e4-a007-0d29935cd83a-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.741290 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-56664b65dc-mkdgh" event={"ID":"f411fba7-d7b2-4d97-9388-c1b6f57e8328","Type":"ContainerStarted","Data":"19d4a2ab4802b975ae7e0927266d7ad3cf0970d05ff5693a288b543337eaba1d"} Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.754471 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-g9jf5" event={"ID":"d7eee1cb-c5d6-45e4-a007-0d29935cd83a","Type":"ContainerDied","Data":"94ad7b5192c1cf2720a9a41e7e9875b0b4e3d8db356ff52e4e74b8cd0af516b7"} Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.754688 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="94ad7b5192c1cf2720a9a41e7e9875b0b4e3d8db356ff52e4e74b8cd0af516b7" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.754480 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-g9jf5" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.761204 4857 generic.go:334] "Generic (PLEG): container finished" podID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerID="994efca9eff08e2894a93e0654ccdda0445a5967b2b3b92b4ffb7d9c1232d363" exitCode=0 Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.761235 4857 generic.go:334] "Generic (PLEG): container finished" podID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerID="45ab71fd172102a32778d57deb04ace9af8696a6011cee74dcc94ab1721f814a" exitCode=2 Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.761245 4857 generic.go:334] "Generic (PLEG): container finished" podID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerID="d64f59fc0d63d8fdddaf62677a40dab269073c3700fca581921792c2837fe785" exitCode=0 Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.761281 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dd1c47cb-0f7e-42f1-824b-a6cef692c751","Type":"ContainerDied","Data":"994efca9eff08e2894a93e0654ccdda0445a5967b2b3b92b4ffb7d9c1232d363"} Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.761330 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dd1c47cb-0f7e-42f1-824b-a6cef692c751","Type":"ContainerDied","Data":"45ab71fd172102a32778d57deb04ace9af8696a6011cee74dcc94ab1721f814a"} Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.761343 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dd1c47cb-0f7e-42f1-824b-a6cef692c751","Type":"ContainerDied","Data":"d64f59fc0d63d8fdddaf62677a40dab269073c3700fca581921792c2837fe785"} Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.768391 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" event={"ID":"0d2e145c-5068-4dff-a35e-14fe385cdcf2","Type":"ContainerStarted","Data":"66bd52cc70500c43fcbe9f5c5ac33ce420cc24212dd17f80d02d735ec7519d64"} Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.769872 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f5cc64f6b-5tvl6" event={"ID":"fa95429a-5622-4bcb-a065-8ff916c55bb9","Type":"ContainerStarted","Data":"4afcfea2a87b26b9b4261f7f29aa83d6eaf1b3dba1540e3bfbfdd796e9be994d"} Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.769901 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f5cc64f6b-5tvl6" event={"ID":"fa95429a-5622-4bcb-a065-8ff916c55bb9","Type":"ContainerStarted","Data":"596175288a06a7a15c708b2eee248bdbe809f62ad2e84d79070f3281ec4e96d0"} Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.769911 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f5cc64f6b-5tvl6" event={"ID":"fa95429a-5622-4bcb-a065-8ff916c55bb9","Type":"ContainerStarted","Data":"e1aa5f085e6e2d5ae825822812e4278d0e2224c7030e07384c5bd34e402ad2b8"} Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.770050 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.770210 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.772217 4857 generic.go:334] "Generic (PLEG): container finished" podID="e578310f-afa3-4d86-8115-98baf09ff7ef" containerID="86d25a119789f5851728048126e00c909bedd7599605d6b8a559c1c2c8a73470" exitCode=0 Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.772256 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" event={"ID":"e578310f-afa3-4d86-8115-98baf09ff7ef","Type":"ContainerDied","Data":"86d25a119789f5851728048126e00c909bedd7599605d6b8a559c1c2c8a73470"} Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.772277 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" event={"ID":"e578310f-afa3-4d86-8115-98baf09ff7ef","Type":"ContainerStarted","Data":"c7741781090b9971cde3e32cbb2b5b70815882262a85effbfb38bc53be473590"} Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.791093 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5f5cc64f6b-5tvl6" podStartSLOduration=2.791076803 podStartE2EDuration="2.791076803s" podCreationTimestamp="2025-11-28 13:41:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:41:02.78615191 +0000 UTC m=+1354.813527077" watchObservedRunningTime="2025-11-28 13:41:02.791076803 +0000 UTC m=+1354.818451970" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.803443 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.803489 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.838909 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.851778 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.935060 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:41:02 crc kubenswrapper[4857]: E1128 13:41:02.935855 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7eee1cb-c5d6-45e4-a007-0d29935cd83a" containerName="cinder-db-sync" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.935872 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7eee1cb-c5d6-45e4-a007-0d29935cd83a" containerName="cinder-db-sync" Nov 28 13:41:02 crc kubenswrapper[4857]: E1128 13:41:02.935920 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="992a1ed2-a6f4-44c2-9cb9-73857ba4d53a" containerName="init" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.935928 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="992a1ed2-a6f4-44c2-9cb9-73857ba4d53a" containerName="init" Nov 28 13:41:02 crc kubenswrapper[4857]: E1128 13:41:02.935945 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="992a1ed2-a6f4-44c2-9cb9-73857ba4d53a" containerName="dnsmasq-dns" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.935951 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="992a1ed2-a6f4-44c2-9cb9-73857ba4d53a" containerName="dnsmasq-dns" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.936308 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="992a1ed2-a6f4-44c2-9cb9-73857ba4d53a" containerName="dnsmasq-dns" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.936335 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7eee1cb-c5d6-45e4-a007-0d29935cd83a" containerName="cinder-db-sync" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.939207 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.945000 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-c29m2" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.945338 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.945406 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.945469 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 13:41:02 crc kubenswrapper[4857]: I1128 13:41:02.980335 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.036827 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-vlrhh"] Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.063297 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.063380 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.063422 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-config-data\") pod \"cinder-scheduler-0\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.063469 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-scripts\") pod \"cinder-scheduler-0\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.063524 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6t9xv\" (UniqueName: \"kubernetes.io/projected/607af733-25c0-43f4-8be8-328836908ca1-kube-api-access-6t9xv\") pod \"cinder-scheduler-0\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.063547 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/607af733-25c0-43f4-8be8-328836908ca1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.081039 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-gcnr8"] Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.082712 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.120255 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-gcnr8"] Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.166711 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.170320 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.171833 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.172766 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-gcnr8\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.172811 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snc64\" (UniqueName: \"kubernetes.io/projected/a6e77d16-341a-4f84-8427-82712eb6541f-kube-api-access-snc64\") pod \"dnsmasq-dns-6578955fd5-gcnr8\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.172873 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.172926 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-config-data\") pod \"cinder-scheduler-0\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.173029 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-scripts\") pod \"cinder-scheduler-0\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.173047 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-gcnr8\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.173088 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-gcnr8\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.173105 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-dns-svc\") pod \"dnsmasq-dns-6578955fd5-gcnr8\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.173177 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6t9xv\" (UniqueName: \"kubernetes.io/projected/607af733-25c0-43f4-8be8-328836908ca1-kube-api-access-6t9xv\") pod \"cinder-scheduler-0\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.173213 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-config\") pod \"dnsmasq-dns-6578955fd5-gcnr8\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.173229 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/607af733-25c0-43f4-8be8-328836908ca1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.173314 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/607af733-25c0-43f4-8be8-328836908ca1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.175345 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.179267 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.179321 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-scripts\") pod \"cinder-scheduler-0\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.179314 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.179410 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.180038 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e3860c9cd9dfa55680c98f69cece6eff0f08ced38d345f3573b02bd062397f7a"} pod="openshift-machine-config-operator/machine-config-daemon-jdgls" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.180081 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" containerID="cri-o://e3860c9cd9dfa55680c98f69cece6eff0f08ced38d345f3573b02bd062397f7a" gracePeriod=600 Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.182379 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.187006 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.204583 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6t9xv\" (UniqueName: \"kubernetes.io/projected/607af733-25c0-43f4-8be8-328836908ca1-kube-api-access-6t9xv\") pod \"cinder-scheduler-0\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.212493 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-config-data\") pod \"cinder-scheduler-0\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.213007 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.275715 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-scripts\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.275787 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-gcnr8\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.275811 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-dns-svc\") pod \"dnsmasq-dns-6578955fd5-gcnr8\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.275873 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-config\") pod \"dnsmasq-dns-6578955fd5-gcnr8\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.275927 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-gcnr8\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.275953 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snc64\" (UniqueName: \"kubernetes.io/projected/a6e77d16-341a-4f84-8427-82712eb6541f-kube-api-access-snc64\") pod \"dnsmasq-dns-6578955fd5-gcnr8\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.275990 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4cw6\" (UniqueName: \"kubernetes.io/projected/50626686-278e-47a5-a186-a84b4f9cc1a6-kube-api-access-d4cw6\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.276014 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.276050 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-config-data-custom\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.276074 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-config-data\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.276094 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/50626686-278e-47a5-a186-a84b4f9cc1a6-etc-machine-id\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.276130 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50626686-278e-47a5-a186-a84b4f9cc1a6-logs\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.276175 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-gcnr8\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.276893 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-gcnr8\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.277105 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-gcnr8\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.277830 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-gcnr8\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.278344 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-config\") pod \"dnsmasq-dns-6578955fd5-gcnr8\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.280581 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-dns-svc\") pod \"dnsmasq-dns-6578955fd5-gcnr8\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.295015 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snc64\" (UniqueName: \"kubernetes.io/projected/a6e77d16-341a-4f84-8427-82712eb6541f-kube-api-access-snc64\") pod \"dnsmasq-dns-6578955fd5-gcnr8\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.304113 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.377585 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4cw6\" (UniqueName: \"kubernetes.io/projected/50626686-278e-47a5-a186-a84b4f9cc1a6-kube-api-access-d4cw6\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.377630 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.377655 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-config-data-custom\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.377684 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-config-data\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.377708 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/50626686-278e-47a5-a186-a84b4f9cc1a6-etc-machine-id\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.377771 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50626686-278e-47a5-a186-a84b4f9cc1a6-logs\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.377814 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-scripts\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.379277 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/50626686-278e-47a5-a186-a84b4f9cc1a6-etc-machine-id\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.380157 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50626686-278e-47a5-a186-a84b4f9cc1a6-logs\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.388940 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-scripts\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.389790 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.391836 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-config-data\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.395312 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-config-data-custom\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.399037 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4cw6\" (UniqueName: \"kubernetes.io/projected/50626686-278e-47a5-a186-a84b4f9cc1a6-kube-api-access-d4cw6\") pod \"cinder-api-0\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.425490 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.694692 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.792418 4857 generic.go:334] "Generic (PLEG): container finished" podID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerID="e3860c9cd9dfa55680c98f69cece6eff0f08ced38d345f3573b02bd062397f7a" exitCode=0 Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.793850 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerDied","Data":"e3860c9cd9dfa55680c98f69cece6eff0f08ced38d345f3573b02bd062397f7a"} Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.793925 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.793938 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 13:41:03 crc kubenswrapper[4857]: I1128 13:41:03.793953 4857 scope.go:117] "RemoveContainer" containerID="c7acb098908896eeec6673568d27f9b2d0362ab62a9a136da040ab452639a28c" Nov 28 13:41:04 crc kubenswrapper[4857]: I1128 13:41:04.255250 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-gcnr8"] Nov 28 13:41:04 crc kubenswrapper[4857]: I1128 13:41:04.391062 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:41:04 crc kubenswrapper[4857]: W1128 13:41:04.427828 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod607af733_25c0_43f4_8be8_328836908ca1.slice/crio-7601d9063bdf9e7b2ac1ac8c59b824b649c3ec3d61244763c07c03a4759dbb7a WatchSource:0}: Error finding container 7601d9063bdf9e7b2ac1ac8c59b824b649c3ec3d61244763c07c03a4759dbb7a: Status 404 returned error can't find the container with id 7601d9063bdf9e7b2ac1ac8c59b824b649c3ec3d61244763c07c03a4759dbb7a Nov 28 13:41:04 crc kubenswrapper[4857]: I1128 13:41:04.430828 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:41:04 crc kubenswrapper[4857]: I1128 13:41:04.808368 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"50626686-278e-47a5-a186-a84b4f9cc1a6","Type":"ContainerStarted","Data":"acf22b6ccb921ce79e610833fe4e690415cc93bee92ce8b561bcd46f5c027e0e"} Nov 28 13:41:04 crc kubenswrapper[4857]: I1128 13:41:04.812595 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerStarted","Data":"aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7"} Nov 28 13:41:04 crc kubenswrapper[4857]: I1128 13:41:04.820780 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" event={"ID":"0d2e145c-5068-4dff-a35e-14fe385cdcf2","Type":"ContainerStarted","Data":"bf2b4d3b1fd6b8f1241149d7a1019420f1580f221c1eb7ad343a94db93716eb4"} Nov 28 13:41:04 crc kubenswrapper[4857]: I1128 13:41:04.826577 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" podUID="e578310f-afa3-4d86-8115-98baf09ff7ef" containerName="dnsmasq-dns" containerID="cri-o://29ec0757e3778ff63e6b2cbc1f36a5dcbb0fecbf0f340ccbf2a8f41aaec3b24d" gracePeriod=10 Nov 28 13:41:04 crc kubenswrapper[4857]: I1128 13:41:04.826793 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" event={"ID":"e578310f-afa3-4d86-8115-98baf09ff7ef","Type":"ContainerStarted","Data":"29ec0757e3778ff63e6b2cbc1f36a5dcbb0fecbf0f340ccbf2a8f41aaec3b24d"} Nov 28 13:41:04 crc kubenswrapper[4857]: I1128 13:41:04.826970 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:04 crc kubenswrapper[4857]: I1128 13:41:04.829960 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"607af733-25c0-43f4-8be8-328836908ca1","Type":"ContainerStarted","Data":"7601d9063bdf9e7b2ac1ac8c59b824b649c3ec3d61244763c07c03a4759dbb7a"} Nov 28 13:41:04 crc kubenswrapper[4857]: I1128 13:41:04.834900 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" event={"ID":"a6e77d16-341a-4f84-8427-82712eb6541f","Type":"ContainerStarted","Data":"3d7c5a95cc17b61e5779079b5ed8f66cbc86adaac85d0b25a34560b3785518f8"} Nov 28 13:41:04 crc kubenswrapper[4857]: I1128 13:41:04.853402 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" podStartSLOduration=4.85338267 podStartE2EDuration="4.85338267s" podCreationTimestamp="2025-11-28 13:41:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:41:04.850779005 +0000 UTC m=+1356.878154182" watchObservedRunningTime="2025-11-28 13:41:04.85338267 +0000 UTC m=+1356.880757827" Nov 28 13:41:05 crc kubenswrapper[4857]: I1128 13:41:05.858496 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"50626686-278e-47a5-a186-a84b4f9cc1a6","Type":"ContainerStarted","Data":"db4167413097db7b63589b13fa9e51e3d4f7eb73aa7b4c5271ad7347debf54fa"} Nov 28 13:41:05 crc kubenswrapper[4857]: I1128 13:41:05.866620 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" event={"ID":"0d2e145c-5068-4dff-a35e-14fe385cdcf2","Type":"ContainerStarted","Data":"89d728886b576deecbaf0ff9d24f62808734b9aa348aaf482ac876e2835345d1"} Nov 28 13:41:05 crc kubenswrapper[4857]: I1128 13:41:05.869363 4857 generic.go:334] "Generic (PLEG): container finished" podID="e578310f-afa3-4d86-8115-98baf09ff7ef" containerID="29ec0757e3778ff63e6b2cbc1f36a5dcbb0fecbf0f340ccbf2a8f41aaec3b24d" exitCode=0 Nov 28 13:41:05 crc kubenswrapper[4857]: I1128 13:41:05.869460 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" event={"ID":"e578310f-afa3-4d86-8115-98baf09ff7ef","Type":"ContainerDied","Data":"29ec0757e3778ff63e6b2cbc1f36a5dcbb0fecbf0f340ccbf2a8f41aaec3b24d"} Nov 28 13:41:05 crc kubenswrapper[4857]: I1128 13:41:05.880636 4857 generic.go:334] "Generic (PLEG): container finished" podID="a6e77d16-341a-4f84-8427-82712eb6541f" containerID="407addd6ca088094b297b7f7f920655b1a8c2631adf913b9cc4c9d7a693417a5" exitCode=0 Nov 28 13:41:05 crc kubenswrapper[4857]: I1128 13:41:05.880835 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" event={"ID":"a6e77d16-341a-4f84-8427-82712eb6541f","Type":"ContainerDied","Data":"407addd6ca088094b297b7f7f920655b1a8c2631adf913b9cc4c9d7a693417a5"} Nov 28 13:41:05 crc kubenswrapper[4857]: I1128 13:41:05.884417 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 13:41:05 crc kubenswrapper[4857]: I1128 13:41:05.884442 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 13:41:05 crc kubenswrapper[4857]: I1128 13:41:05.885391 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-56664b65dc-mkdgh" event={"ID":"f411fba7-d7b2-4d97-9388-c1b6f57e8328","Type":"ContainerStarted","Data":"8b3ff8b7cb9bbbd5d33a06e4dc7773db3800416089a3b589b96e2930ebcb5b38"} Nov 28 13:41:05 crc kubenswrapper[4857]: I1128 13:41:05.910673 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" podStartSLOduration=3.866414625 podStartE2EDuration="5.91065037s" podCreationTimestamp="2025-11-28 13:41:00 +0000 UTC" firstStartedPulling="2025-11-28 13:41:01.759197468 +0000 UTC m=+1353.786572635" lastFinishedPulling="2025-11-28 13:41:03.803433213 +0000 UTC m=+1355.830808380" observedRunningTime="2025-11-28 13:41:05.901151525 +0000 UTC m=+1357.928526692" watchObservedRunningTime="2025-11-28 13:41:05.91065037 +0000 UTC m=+1357.938025537" Nov 28 13:41:05 crc kubenswrapper[4857]: I1128 13:41:05.995328 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.041332 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-ovsdbserver-nb\") pod \"e578310f-afa3-4d86-8115-98baf09ff7ef\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.041430 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-dns-svc\") pod \"e578310f-afa3-4d86-8115-98baf09ff7ef\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.041553 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-dns-swift-storage-0\") pod \"e578310f-afa3-4d86-8115-98baf09ff7ef\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.041574 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-ovsdbserver-sb\") pod \"e578310f-afa3-4d86-8115-98baf09ff7ef\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.041652 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zsgzm\" (UniqueName: \"kubernetes.io/projected/e578310f-afa3-4d86-8115-98baf09ff7ef-kube-api-access-zsgzm\") pod \"e578310f-afa3-4d86-8115-98baf09ff7ef\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.041680 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-config\") pod \"e578310f-afa3-4d86-8115-98baf09ff7ef\" (UID: \"e578310f-afa3-4d86-8115-98baf09ff7ef\") " Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.050898 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e578310f-afa3-4d86-8115-98baf09ff7ef-kube-api-access-zsgzm" (OuterVolumeSpecName: "kube-api-access-zsgzm") pod "e578310f-afa3-4d86-8115-98baf09ff7ef" (UID: "e578310f-afa3-4d86-8115-98baf09ff7ef"). InnerVolumeSpecName "kube-api-access-zsgzm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.098473 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e578310f-afa3-4d86-8115-98baf09ff7ef" (UID: "e578310f-afa3-4d86-8115-98baf09ff7ef"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.128161 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e578310f-afa3-4d86-8115-98baf09ff7ef" (UID: "e578310f-afa3-4d86-8115-98baf09ff7ef"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.138077 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e578310f-afa3-4d86-8115-98baf09ff7ef" (UID: "e578310f-afa3-4d86-8115-98baf09ff7ef"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.148682 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zsgzm\" (UniqueName: \"kubernetes.io/projected/e578310f-afa3-4d86-8115-98baf09ff7ef-kube-api-access-zsgzm\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.148710 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.148718 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.148727 4857 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.183518 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e578310f-afa3-4d86-8115-98baf09ff7ef" (UID: "e578310f-afa3-4d86-8115-98baf09ff7ef"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.183851 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-config" (OuterVolumeSpecName: "config") pod "e578310f-afa3-4d86-8115-98baf09ff7ef" (UID: "e578310f-afa3-4d86-8115-98baf09ff7ef"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.250047 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.250326 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e578310f-afa3-4d86-8115-98baf09ff7ef-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.765769 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.804927 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.924524 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"607af733-25c0-43f4-8be8-328836908ca1","Type":"ContainerStarted","Data":"6651476b035972f0b29c30fd721cd5e597e49f641f8da373c883918d2676e924"} Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.941201 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" event={"ID":"a6e77d16-341a-4f84-8427-82712eb6541f","Type":"ContainerStarted","Data":"aee9ed79f9bfca814a082362b7a417713d91125d8fa079c602bc4e8a91039739"} Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.942561 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.978857 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-56664b65dc-mkdgh" event={"ID":"f411fba7-d7b2-4d97-9388-c1b6f57e8328","Type":"ContainerStarted","Data":"df7bca3ad7fcc3cc2ac1df9b77614d94b72b4db2a23294091359d1e948b3577e"} Nov 28 13:41:06 crc kubenswrapper[4857]: I1128 13:41:06.998732 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" podStartSLOduration=3.998710811 podStartE2EDuration="3.998710811s" podCreationTimestamp="2025-11-28 13:41:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:41:06.97343094 +0000 UTC m=+1359.000806107" watchObservedRunningTime="2025-11-28 13:41:06.998710811 +0000 UTC m=+1359.026085988" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.002286 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"50626686-278e-47a5-a186-a84b4f9cc1a6","Type":"ContainerStarted","Data":"7c551d4670cb8d16f068cb03d9ce0ea30362fb5d540a5ad97095e335ba84a1e7"} Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.002334 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.026495 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-56664b65dc-mkdgh" podStartSLOduration=5.037675845 podStartE2EDuration="7.020138442s" podCreationTimestamp="2025-11-28 13:41:00 +0000 UTC" firstStartedPulling="2025-11-28 13:41:01.756022286 +0000 UTC m=+1353.783397453" lastFinishedPulling="2025-11-28 13:41:03.738484883 +0000 UTC m=+1355.765860050" observedRunningTime="2025-11-28 13:41:07.011837971 +0000 UTC m=+1359.039213138" watchObservedRunningTime="2025-11-28 13:41:07.020138442 +0000 UTC m=+1359.047513649" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.043293 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.044056 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-vlrhh" event={"ID":"e578310f-afa3-4d86-8115-98baf09ff7ef","Type":"ContainerDied","Data":"c7741781090b9971cde3e32cbb2b5b70815882262a85effbfb38bc53be473590"} Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.044100 4857 scope.go:117] "RemoveContainer" containerID="29ec0757e3778ff63e6b2cbc1f36a5dcbb0fecbf0f340ccbf2a8f41aaec3b24d" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.059858 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.059835221 podStartE2EDuration="4.059835221s" podCreationTimestamp="2025-11-28 13:41:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:41:07.042468378 +0000 UTC m=+1359.069843565" watchObservedRunningTime="2025-11-28 13:41:07.059835221 +0000 UTC m=+1359.087210388" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.106139 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-vlrhh"] Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.116038 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-vlrhh"] Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.125544 4857 scope.go:117] "RemoveContainer" containerID="86d25a119789f5851728048126e00c909bedd7599605d6b8a559c1c2c8a73470" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.437035 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.485906 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-config-data\") pod \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.485943 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-scripts\") pod \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.485987 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279bt\" (UniqueName: \"kubernetes.io/projected/dd1c47cb-0f7e-42f1-824b-a6cef692c751-kube-api-access-279bt\") pod \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.486131 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-sg-core-conf-yaml\") pod \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.486152 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd1c47cb-0f7e-42f1-824b-a6cef692c751-run-httpd\") pod \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.486194 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd1c47cb-0f7e-42f1-824b-a6cef692c751-log-httpd\") pod \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.486251 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-combined-ca-bundle\") pod \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\" (UID: \"dd1c47cb-0f7e-42f1-824b-a6cef692c751\") " Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.521933 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd1c47cb-0f7e-42f1-824b-a6cef692c751-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "dd1c47cb-0f7e-42f1-824b-a6cef692c751" (UID: "dd1c47cb-0f7e-42f1-824b-a6cef692c751"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.529970 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd1c47cb-0f7e-42f1-824b-a6cef692c751-kube-api-access-279bt" (OuterVolumeSpecName: "kube-api-access-279bt") pod "dd1c47cb-0f7e-42f1-824b-a6cef692c751" (UID: "dd1c47cb-0f7e-42f1-824b-a6cef692c751"). InnerVolumeSpecName "kube-api-access-279bt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.531779 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-scripts" (OuterVolumeSpecName: "scripts") pod "dd1c47cb-0f7e-42f1-824b-a6cef692c751" (UID: "dd1c47cb-0f7e-42f1-824b-a6cef692c751"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.543239 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd1c47cb-0f7e-42f1-824b-a6cef692c751-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "dd1c47cb-0f7e-42f1-824b-a6cef692c751" (UID: "dd1c47cb-0f7e-42f1-824b-a6cef692c751"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.588188 4857 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd1c47cb-0f7e-42f1-824b-a6cef692c751-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.588220 4857 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd1c47cb-0f7e-42f1-824b-a6cef692c751-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.588230 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.588239 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279bt\" (UniqueName: \"kubernetes.io/projected/dd1c47cb-0f7e-42f1-824b-a6cef692c751-kube-api-access-279bt\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.625038 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dd1c47cb-0f7e-42f1-824b-a6cef692c751" (UID: "dd1c47cb-0f7e-42f1-824b-a6cef692c751"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.625054 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "dd1c47cb-0f7e-42f1-824b-a6cef692c751" (UID: "dd1c47cb-0f7e-42f1-824b-a6cef692c751"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.684922 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-config-data" (OuterVolumeSpecName: "config-data") pod "dd1c47cb-0f7e-42f1-824b-a6cef692c751" (UID: "dd1c47cb-0f7e-42f1-824b-a6cef692c751"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.697560 4857 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.697593 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.697604 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd1c47cb-0f7e-42f1-824b-a6cef692c751-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:07 crc kubenswrapper[4857]: I1128 13:41:07.717079 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.055338 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"607af733-25c0-43f4-8be8-328836908ca1","Type":"ContainerStarted","Data":"04a1f1011ba9934a5850d4fcba66f3715df0a99289167cdc261fc44efebc3082"} Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.058524 4857 generic.go:334] "Generic (PLEG): container finished" podID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerID="67d7fc4f5095e379062283d9a025c1c36226cc34ff139a5a7da965bb1e6c4b4a" exitCode=0 Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.058613 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.058684 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dd1c47cb-0f7e-42f1-824b-a6cef692c751","Type":"ContainerDied","Data":"67d7fc4f5095e379062283d9a025c1c36226cc34ff139a5a7da965bb1e6c4b4a"} Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.058726 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dd1c47cb-0f7e-42f1-824b-a6cef692c751","Type":"ContainerDied","Data":"e704e27bb3e276011833d42591807b7d40f9312c9b8dfc5c46853c9021dcf99b"} Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.058825 4857 scope.go:117] "RemoveContainer" containerID="994efca9eff08e2894a93e0654ccdda0445a5967b2b3b92b4ffb7d9c1232d363" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.093528 4857 scope.go:117] "RemoveContainer" containerID="45ab71fd172102a32778d57deb04ace9af8696a6011cee74dcc94ab1721f814a" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.121108 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.850264445 podStartE2EDuration="6.121086355s" podCreationTimestamp="2025-11-28 13:41:02 +0000 UTC" firstStartedPulling="2025-11-28 13:41:04.431454609 +0000 UTC m=+1356.458829776" lastFinishedPulling="2025-11-28 13:41:05.702276529 +0000 UTC m=+1357.729651686" observedRunningTime="2025-11-28 13:41:08.093667291 +0000 UTC m=+1360.121042468" watchObservedRunningTime="2025-11-28 13:41:08.121086355 +0000 UTC m=+1360.148461522" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.143158 4857 scope.go:117] "RemoveContainer" containerID="67d7fc4f5095e379062283d9a025c1c36226cc34ff139a5a7da965bb1e6c4b4a" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.192528 4857 scope.go:117] "RemoveContainer" containerID="d64f59fc0d63d8fdddaf62677a40dab269073c3700fca581921792c2837fe785" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.192681 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.205803 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.212901 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:08 crc kubenswrapper[4857]: E1128 13:41:08.213289 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerName="proxy-httpd" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.213307 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerName="proxy-httpd" Nov 28 13:41:08 crc kubenswrapper[4857]: E1128 13:41:08.213323 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerName="ceilometer-notification-agent" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.213328 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerName="ceilometer-notification-agent" Nov 28 13:41:08 crc kubenswrapper[4857]: E1128 13:41:08.213337 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerName="ceilometer-central-agent" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.213343 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerName="ceilometer-central-agent" Nov 28 13:41:08 crc kubenswrapper[4857]: E1128 13:41:08.213353 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e578310f-afa3-4d86-8115-98baf09ff7ef" containerName="dnsmasq-dns" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.213359 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e578310f-afa3-4d86-8115-98baf09ff7ef" containerName="dnsmasq-dns" Nov 28 13:41:08 crc kubenswrapper[4857]: E1128 13:41:08.213370 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e578310f-afa3-4d86-8115-98baf09ff7ef" containerName="init" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.213376 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e578310f-afa3-4d86-8115-98baf09ff7ef" containerName="init" Nov 28 13:41:08 crc kubenswrapper[4857]: E1128 13:41:08.213391 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerName="sg-core" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.213397 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerName="sg-core" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.213641 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerName="sg-core" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.213655 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e578310f-afa3-4d86-8115-98baf09ff7ef" containerName="dnsmasq-dns" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.213662 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerName="ceilometer-central-agent" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.213671 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerName="proxy-httpd" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.213692 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" containerName="ceilometer-notification-agent" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.225016 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.225496 4857 scope.go:117] "RemoveContainer" containerID="994efca9eff08e2894a93e0654ccdda0445a5967b2b3b92b4ffb7d9c1232d363" Nov 28 13:41:08 crc kubenswrapper[4857]: E1128 13:41:08.227860 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"994efca9eff08e2894a93e0654ccdda0445a5967b2b3b92b4ffb7d9c1232d363\": container with ID starting with 994efca9eff08e2894a93e0654ccdda0445a5967b2b3b92b4ffb7d9c1232d363 not found: ID does not exist" containerID="994efca9eff08e2894a93e0654ccdda0445a5967b2b3b92b4ffb7d9c1232d363" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.227894 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"994efca9eff08e2894a93e0654ccdda0445a5967b2b3b92b4ffb7d9c1232d363"} err="failed to get container status \"994efca9eff08e2894a93e0654ccdda0445a5967b2b3b92b4ffb7d9c1232d363\": rpc error: code = NotFound desc = could not find container \"994efca9eff08e2894a93e0654ccdda0445a5967b2b3b92b4ffb7d9c1232d363\": container with ID starting with 994efca9eff08e2894a93e0654ccdda0445a5967b2b3b92b4ffb7d9c1232d363 not found: ID does not exist" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.227917 4857 scope.go:117] "RemoveContainer" containerID="45ab71fd172102a32778d57deb04ace9af8696a6011cee74dcc94ab1721f814a" Nov 28 13:41:08 crc kubenswrapper[4857]: E1128 13:41:08.228347 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45ab71fd172102a32778d57deb04ace9af8696a6011cee74dcc94ab1721f814a\": container with ID starting with 45ab71fd172102a32778d57deb04ace9af8696a6011cee74dcc94ab1721f814a not found: ID does not exist" containerID="45ab71fd172102a32778d57deb04ace9af8696a6011cee74dcc94ab1721f814a" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.228372 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45ab71fd172102a32778d57deb04ace9af8696a6011cee74dcc94ab1721f814a"} err="failed to get container status \"45ab71fd172102a32778d57deb04ace9af8696a6011cee74dcc94ab1721f814a\": rpc error: code = NotFound desc = could not find container \"45ab71fd172102a32778d57deb04ace9af8696a6011cee74dcc94ab1721f814a\": container with ID starting with 45ab71fd172102a32778d57deb04ace9af8696a6011cee74dcc94ab1721f814a not found: ID does not exist" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.228389 4857 scope.go:117] "RemoveContainer" containerID="67d7fc4f5095e379062283d9a025c1c36226cc34ff139a5a7da965bb1e6c4b4a" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.228410 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.228523 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 13:41:08 crc kubenswrapper[4857]: E1128 13:41:08.228655 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67d7fc4f5095e379062283d9a025c1c36226cc34ff139a5a7da965bb1e6c4b4a\": container with ID starting with 67d7fc4f5095e379062283d9a025c1c36226cc34ff139a5a7da965bb1e6c4b4a not found: ID does not exist" containerID="67d7fc4f5095e379062283d9a025c1c36226cc34ff139a5a7da965bb1e6c4b4a" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.228681 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67d7fc4f5095e379062283d9a025c1c36226cc34ff139a5a7da965bb1e6c4b4a"} err="failed to get container status \"67d7fc4f5095e379062283d9a025c1c36226cc34ff139a5a7da965bb1e6c4b4a\": rpc error: code = NotFound desc = could not find container \"67d7fc4f5095e379062283d9a025c1c36226cc34ff139a5a7da965bb1e6c4b4a\": container with ID starting with 67d7fc4f5095e379062283d9a025c1c36226cc34ff139a5a7da965bb1e6c4b4a not found: ID does not exist" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.228703 4857 scope.go:117] "RemoveContainer" containerID="d64f59fc0d63d8fdddaf62677a40dab269073c3700fca581921792c2837fe785" Nov 28 13:41:08 crc kubenswrapper[4857]: E1128 13:41:08.230480 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d64f59fc0d63d8fdddaf62677a40dab269073c3700fca581921792c2837fe785\": container with ID starting with d64f59fc0d63d8fdddaf62677a40dab269073c3700fca581921792c2837fe785 not found: ID does not exist" containerID="d64f59fc0d63d8fdddaf62677a40dab269073c3700fca581921792c2837fe785" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.230518 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d64f59fc0d63d8fdddaf62677a40dab269073c3700fca581921792c2837fe785"} err="failed to get container status \"d64f59fc0d63d8fdddaf62677a40dab269073c3700fca581921792c2837fe785\": rpc error: code = NotFound desc = could not find container \"d64f59fc0d63d8fdddaf62677a40dab269073c3700fca581921792c2837fe785\": container with ID starting with d64f59fc0d63d8fdddaf62677a40dab269073c3700fca581921792c2837fe785 not found: ID does not exist" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.231841 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.266920 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-575548d9c6-4zx6z"] Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.268806 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.273190 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.273317 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.279011 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-575548d9c6-4zx6z"] Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.306836 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.310990 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-config-data-custom\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.311047 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-config-data\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.311078 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.311104 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-log-httpd\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.311133 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-public-tls-certs\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.311191 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-combined-ca-bundle\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.311210 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-scripts\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.311230 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-run-httpd\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.311249 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.311262 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d0c82d5-b320-444c-a4d9-838ca3097157-logs\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.311282 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-config-data\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.311296 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qjn8\" (UniqueName: \"kubernetes.io/projected/0d0c82d5-b320-444c-a4d9-838ca3097157-kube-api-access-5qjn8\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.311314 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-internal-tls-certs\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.311356 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jfsr\" (UniqueName: \"kubernetes.io/projected/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-kube-api-access-4jfsr\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.322288 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd1c47cb-0f7e-42f1-824b-a6cef692c751" path="/var/lib/kubelet/pods/dd1c47cb-0f7e-42f1-824b-a6cef692c751/volumes" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.323183 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e578310f-afa3-4d86-8115-98baf09ff7ef" path="/var/lib/kubelet/pods/e578310f-afa3-4d86-8115-98baf09ff7ef/volumes" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.412940 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-run-httpd\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.413006 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.413029 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d0c82d5-b320-444c-a4d9-838ca3097157-logs\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.413058 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-config-data\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.413078 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qjn8\" (UniqueName: \"kubernetes.io/projected/0d0c82d5-b320-444c-a4d9-838ca3097157-kube-api-access-5qjn8\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.413102 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-internal-tls-certs\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.413187 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jfsr\" (UniqueName: \"kubernetes.io/projected/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-kube-api-access-4jfsr\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.413230 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-config-data-custom\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.413291 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-config-data\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.413334 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.413359 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-log-httpd\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.413388 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-public-tls-certs\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.413471 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-combined-ca-bundle\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.413491 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-scripts\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.415312 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-run-httpd\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.415853 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-log-httpd\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.416433 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d0c82d5-b320-444c-a4d9-838ca3097157-logs\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.418914 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.419327 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-config-data\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.420262 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-scripts\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.435849 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-config-data-custom\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.436398 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.437066 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-config-data\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.440291 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-internal-tls-certs\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.440876 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-public-tls-certs\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.441298 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-combined-ca-bundle\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.445993 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qjn8\" (UniqueName: \"kubernetes.io/projected/0d0c82d5-b320-444c-a4d9-838ca3097157-kube-api-access-5qjn8\") pod \"barbican-api-575548d9c6-4zx6z\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.450432 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jfsr\" (UniqueName: \"kubernetes.io/projected/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-kube-api-access-4jfsr\") pod \"ceilometer-0\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.554271 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:41:08 crc kubenswrapper[4857]: I1128 13:41:08.605341 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.069142 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="50626686-278e-47a5-a186-a84b4f9cc1a6" containerName="cinder-api-log" containerID="cri-o://db4167413097db7b63589b13fa9e51e3d4f7eb73aa7b4c5271ad7347debf54fa" gracePeriod=30 Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.069448 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="50626686-278e-47a5-a186-a84b4f9cc1a6" containerName="cinder-api" containerID="cri-o://7c551d4670cb8d16f068cb03d9ce0ea30362fb5d540a5ad97095e335ba84a1e7" gracePeriod=30 Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.109134 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:09 crc kubenswrapper[4857]: W1128 13:41:09.112296 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod82de4c9c_25af_49e1_9579_f51ad9a3d4ec.slice/crio-4d9a08ff646eb5bd2226dddbd4509c2e93604521bae5a149cbfc47949d76d6c6 WatchSource:0}: Error finding container 4d9a08ff646eb5bd2226dddbd4509c2e93604521bae5a149cbfc47949d76d6c6: Status 404 returned error can't find the container with id 4d9a08ff646eb5bd2226dddbd4509c2e93604521bae5a149cbfc47949d76d6c6 Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.250255 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-575548d9c6-4zx6z"] Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.584619 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.649457 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50626686-278e-47a5-a186-a84b4f9cc1a6-logs\") pod \"50626686-278e-47a5-a186-a84b4f9cc1a6\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.649523 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-scripts\") pod \"50626686-278e-47a5-a186-a84b4f9cc1a6\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.649539 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-config-data\") pod \"50626686-278e-47a5-a186-a84b4f9cc1a6\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.649582 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/50626686-278e-47a5-a186-a84b4f9cc1a6-etc-machine-id\") pod \"50626686-278e-47a5-a186-a84b4f9cc1a6\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.649641 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-config-data-custom\") pod \"50626686-278e-47a5-a186-a84b4f9cc1a6\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.649708 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-combined-ca-bundle\") pod \"50626686-278e-47a5-a186-a84b4f9cc1a6\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.649838 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4cw6\" (UniqueName: \"kubernetes.io/projected/50626686-278e-47a5-a186-a84b4f9cc1a6-kube-api-access-d4cw6\") pod \"50626686-278e-47a5-a186-a84b4f9cc1a6\" (UID: \"50626686-278e-47a5-a186-a84b4f9cc1a6\") " Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.649966 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50626686-278e-47a5-a186-a84b4f9cc1a6-logs" (OuterVolumeSpecName: "logs") pod "50626686-278e-47a5-a186-a84b4f9cc1a6" (UID: "50626686-278e-47a5-a186-a84b4f9cc1a6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.650007 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/50626686-278e-47a5-a186-a84b4f9cc1a6-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "50626686-278e-47a5-a186-a84b4f9cc1a6" (UID: "50626686-278e-47a5-a186-a84b4f9cc1a6"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.650225 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50626686-278e-47a5-a186-a84b4f9cc1a6-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.650236 4857 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/50626686-278e-47a5-a186-a84b4f9cc1a6-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.654253 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-scripts" (OuterVolumeSpecName: "scripts") pod "50626686-278e-47a5-a186-a84b4f9cc1a6" (UID: "50626686-278e-47a5-a186-a84b4f9cc1a6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.661027 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "50626686-278e-47a5-a186-a84b4f9cc1a6" (UID: "50626686-278e-47a5-a186-a84b4f9cc1a6"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.663426 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50626686-278e-47a5-a186-a84b4f9cc1a6-kube-api-access-d4cw6" (OuterVolumeSpecName: "kube-api-access-d4cw6") pod "50626686-278e-47a5-a186-a84b4f9cc1a6" (UID: "50626686-278e-47a5-a186-a84b4f9cc1a6"). InnerVolumeSpecName "kube-api-access-d4cw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.709661 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "50626686-278e-47a5-a186-a84b4f9cc1a6" (UID: "50626686-278e-47a5-a186-a84b4f9cc1a6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.742151 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-config-data" (OuterVolumeSpecName: "config-data") pod "50626686-278e-47a5-a186-a84b4f9cc1a6" (UID: "50626686-278e-47a5-a186-a84b4f9cc1a6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.751608 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.751647 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.751661 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.751677 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50626686-278e-47a5-a186-a84b4f9cc1a6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:09 crc kubenswrapper[4857]: I1128 13:41:09.751690 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4cw6\" (UniqueName: \"kubernetes.io/projected/50626686-278e-47a5-a186-a84b4f9cc1a6-kube-api-access-d4cw6\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.078150 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82de4c9c-25af-49e1-9579-f51ad9a3d4ec","Type":"ContainerStarted","Data":"efb20f4a4e327f203008825bf6990f8eb7c028b35f176a97f5ec6f6de2f09df8"} Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.078463 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82de4c9c-25af-49e1-9579-f51ad9a3d4ec","Type":"ContainerStarted","Data":"4d9a08ff646eb5bd2226dddbd4509c2e93604521bae5a149cbfc47949d76d6c6"} Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.080666 4857 generic.go:334] "Generic (PLEG): container finished" podID="50626686-278e-47a5-a186-a84b4f9cc1a6" containerID="7c551d4670cb8d16f068cb03d9ce0ea30362fb5d540a5ad97095e335ba84a1e7" exitCode=0 Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.080691 4857 generic.go:334] "Generic (PLEG): container finished" podID="50626686-278e-47a5-a186-a84b4f9cc1a6" containerID="db4167413097db7b63589b13fa9e51e3d4f7eb73aa7b4c5271ad7347debf54fa" exitCode=143 Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.080706 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"50626686-278e-47a5-a186-a84b4f9cc1a6","Type":"ContainerDied","Data":"7c551d4670cb8d16f068cb03d9ce0ea30362fb5d540a5ad97095e335ba84a1e7"} Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.080775 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.080797 4857 scope.go:117] "RemoveContainer" containerID="7c551d4670cb8d16f068cb03d9ce0ea30362fb5d540a5ad97095e335ba84a1e7" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.080783 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"50626686-278e-47a5-a186-a84b4f9cc1a6","Type":"ContainerDied","Data":"db4167413097db7b63589b13fa9e51e3d4f7eb73aa7b4c5271ad7347debf54fa"} Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.080957 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"50626686-278e-47a5-a186-a84b4f9cc1a6","Type":"ContainerDied","Data":"acf22b6ccb921ce79e610833fe4e690415cc93bee92ce8b561bcd46f5c027e0e"} Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.083793 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-575548d9c6-4zx6z" event={"ID":"0d0c82d5-b320-444c-a4d9-838ca3097157","Type":"ContainerStarted","Data":"6307f97c800ac6b026e60dbaa702231a4783caba353d39fc8956c6ce72d5e01e"} Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.083851 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-575548d9c6-4zx6z" event={"ID":"0d0c82d5-b320-444c-a4d9-838ca3097157","Type":"ContainerStarted","Data":"d880cc69cc93c55dd123da2ed1ba8cf195b6e491b2fba33f24d18a403279c8c6"} Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.083865 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-575548d9c6-4zx6z" event={"ID":"0d0c82d5-b320-444c-a4d9-838ca3097157","Type":"ContainerStarted","Data":"578ec44bae4cc47b2a5a3b427aa426a702302e488a3c16e25719531dba92e1ee"} Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.084351 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.084379 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.106458 4857 scope.go:117] "RemoveContainer" containerID="db4167413097db7b63589b13fa9e51e3d4f7eb73aa7b4c5271ad7347debf54fa" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.110485 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-575548d9c6-4zx6z" podStartSLOduration=2.110466493 podStartE2EDuration="2.110466493s" podCreationTimestamp="2025-11-28 13:41:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:41:10.10618924 +0000 UTC m=+1362.133564417" watchObservedRunningTime="2025-11-28 13:41:10.110466493 +0000 UTC m=+1362.137841660" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.142606 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.165888 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.166906 4857 scope.go:117] "RemoveContainer" containerID="7c551d4670cb8d16f068cb03d9ce0ea30362fb5d540a5ad97095e335ba84a1e7" Nov 28 13:41:10 crc kubenswrapper[4857]: E1128 13:41:10.169665 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c551d4670cb8d16f068cb03d9ce0ea30362fb5d540a5ad97095e335ba84a1e7\": container with ID starting with 7c551d4670cb8d16f068cb03d9ce0ea30362fb5d540a5ad97095e335ba84a1e7 not found: ID does not exist" containerID="7c551d4670cb8d16f068cb03d9ce0ea30362fb5d540a5ad97095e335ba84a1e7" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.169730 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c551d4670cb8d16f068cb03d9ce0ea30362fb5d540a5ad97095e335ba84a1e7"} err="failed to get container status \"7c551d4670cb8d16f068cb03d9ce0ea30362fb5d540a5ad97095e335ba84a1e7\": rpc error: code = NotFound desc = could not find container \"7c551d4670cb8d16f068cb03d9ce0ea30362fb5d540a5ad97095e335ba84a1e7\": container with ID starting with 7c551d4670cb8d16f068cb03d9ce0ea30362fb5d540a5ad97095e335ba84a1e7 not found: ID does not exist" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.169876 4857 scope.go:117] "RemoveContainer" containerID="db4167413097db7b63589b13fa9e51e3d4f7eb73aa7b4c5271ad7347debf54fa" Nov 28 13:41:10 crc kubenswrapper[4857]: E1128 13:41:10.171430 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db4167413097db7b63589b13fa9e51e3d4f7eb73aa7b4c5271ad7347debf54fa\": container with ID starting with db4167413097db7b63589b13fa9e51e3d4f7eb73aa7b4c5271ad7347debf54fa not found: ID does not exist" containerID="db4167413097db7b63589b13fa9e51e3d4f7eb73aa7b4c5271ad7347debf54fa" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.171576 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db4167413097db7b63589b13fa9e51e3d4f7eb73aa7b4c5271ad7347debf54fa"} err="failed to get container status \"db4167413097db7b63589b13fa9e51e3d4f7eb73aa7b4c5271ad7347debf54fa\": rpc error: code = NotFound desc = could not find container \"db4167413097db7b63589b13fa9e51e3d4f7eb73aa7b4c5271ad7347debf54fa\": container with ID starting with db4167413097db7b63589b13fa9e51e3d4f7eb73aa7b4c5271ad7347debf54fa not found: ID does not exist" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.171605 4857 scope.go:117] "RemoveContainer" containerID="7c551d4670cb8d16f068cb03d9ce0ea30362fb5d540a5ad97095e335ba84a1e7" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.173243 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c551d4670cb8d16f068cb03d9ce0ea30362fb5d540a5ad97095e335ba84a1e7"} err="failed to get container status \"7c551d4670cb8d16f068cb03d9ce0ea30362fb5d540a5ad97095e335ba84a1e7\": rpc error: code = NotFound desc = could not find container \"7c551d4670cb8d16f068cb03d9ce0ea30362fb5d540a5ad97095e335ba84a1e7\": container with ID starting with 7c551d4670cb8d16f068cb03d9ce0ea30362fb5d540a5ad97095e335ba84a1e7 not found: ID does not exist" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.173302 4857 scope.go:117] "RemoveContainer" containerID="db4167413097db7b63589b13fa9e51e3d4f7eb73aa7b4c5271ad7347debf54fa" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.173668 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db4167413097db7b63589b13fa9e51e3d4f7eb73aa7b4c5271ad7347debf54fa"} err="failed to get container status \"db4167413097db7b63589b13fa9e51e3d4f7eb73aa7b4c5271ad7347debf54fa\": rpc error: code = NotFound desc = could not find container \"db4167413097db7b63589b13fa9e51e3d4f7eb73aa7b4c5271ad7347debf54fa\": container with ID starting with db4167413097db7b63589b13fa9e51e3d4f7eb73aa7b4c5271ad7347debf54fa not found: ID does not exist" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.177686 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:41:10 crc kubenswrapper[4857]: E1128 13:41:10.178336 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50626686-278e-47a5-a186-a84b4f9cc1a6" containerName="cinder-api" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.178572 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="50626686-278e-47a5-a186-a84b4f9cc1a6" containerName="cinder-api" Nov 28 13:41:10 crc kubenswrapper[4857]: E1128 13:41:10.178618 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50626686-278e-47a5-a186-a84b4f9cc1a6" containerName="cinder-api-log" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.178631 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="50626686-278e-47a5-a186-a84b4f9cc1a6" containerName="cinder-api-log" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.178965 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="50626686-278e-47a5-a186-a84b4f9cc1a6" containerName="cinder-api" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.179015 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="50626686-278e-47a5-a186-a84b4f9cc1a6" containerName="cinder-api-log" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.180367 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.182665 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.182974 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.185413 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.188564 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.344996 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50626686-278e-47a5-a186-a84b4f9cc1a6" path="/var/lib/kubelet/pods/50626686-278e-47a5-a186-a84b4f9cc1a6/volumes" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.367355 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.367425 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.367468 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-public-tls-certs\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.367497 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-config-data\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.367513 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-scripts\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.367541 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2w6q5\" (UniqueName: \"kubernetes.io/projected/4477c075-9151-49cc-bb52-82dc34ea46ec-kube-api-access-2w6q5\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.367567 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4477c075-9151-49cc-bb52-82dc34ea46ec-logs\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.367604 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-config-data-custom\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.367650 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4477c075-9151-49cc-bb52-82dc34ea46ec-etc-machine-id\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.468484 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.468536 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.468568 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-public-tls-certs\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.468597 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-config-data\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.468619 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-scripts\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.468646 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2w6q5\" (UniqueName: \"kubernetes.io/projected/4477c075-9151-49cc-bb52-82dc34ea46ec-kube-api-access-2w6q5\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.468683 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4477c075-9151-49cc-bb52-82dc34ea46ec-logs\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.468721 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-config-data-custom\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.468778 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4477c075-9151-49cc-bb52-82dc34ea46ec-etc-machine-id\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.468864 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4477c075-9151-49cc-bb52-82dc34ea46ec-etc-machine-id\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.472485 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4477c075-9151-49cc-bb52-82dc34ea46ec-logs\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.473000 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.473025 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.473488 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-config-data\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.473518 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-public-tls-certs\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.475410 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-scripts\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.477601 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-config-data-custom\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.497532 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2w6q5\" (UniqueName: \"kubernetes.io/projected/4477c075-9151-49cc-bb52-82dc34ea46ec-kube-api-access-2w6q5\") pod \"cinder-api-0\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.533969 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 13:41:10 crc kubenswrapper[4857]: I1128 13:41:10.985123 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:41:10 crc kubenswrapper[4857]: W1128 13:41:10.991920 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4477c075_9151_49cc_bb52_82dc34ea46ec.slice/crio-8536aff5d0d5a3e5608298ae83482182f56a37bc4e05ab9d3c51b4609423a2b5 WatchSource:0}: Error finding container 8536aff5d0d5a3e5608298ae83482182f56a37bc4e05ab9d3c51b4609423a2b5: Status 404 returned error can't find the container with id 8536aff5d0d5a3e5608298ae83482182f56a37bc4e05ab9d3c51b4609423a2b5 Nov 28 13:41:11 crc kubenswrapper[4857]: I1128 13:41:11.102683 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82de4c9c-25af-49e1-9579-f51ad9a3d4ec","Type":"ContainerStarted","Data":"393dc477a432d224c8853c01ad6939e533a259e97bad1e67a93a0e7f77720a1c"} Nov 28 13:41:11 crc kubenswrapper[4857]: I1128 13:41:11.108085 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4477c075-9151-49cc-bb52-82dc34ea46ec","Type":"ContainerStarted","Data":"8536aff5d0d5a3e5608298ae83482182f56a37bc4e05ab9d3c51b4609423a2b5"} Nov 28 13:41:12 crc kubenswrapper[4857]: I1128 13:41:12.148388 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82de4c9c-25af-49e1-9579-f51ad9a3d4ec","Type":"ContainerStarted","Data":"d1daa3e59875008df55b2c17a293c9828d76d615b9a0e170a5e1f7112130e884"} Nov 28 13:41:12 crc kubenswrapper[4857]: I1128 13:41:12.150586 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4477c075-9151-49cc-bb52-82dc34ea46ec","Type":"ContainerStarted","Data":"e24143c91b4a17a69c27afa164bb157bee14c4f0597ed2fa5ef6a42ffe793925"} Nov 28 13:41:12 crc kubenswrapper[4857]: I1128 13:41:12.240506 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:12 crc kubenswrapper[4857]: I1128 13:41:12.532145 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:13 crc kubenswrapper[4857]: I1128 13:41:13.160784 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4477c075-9151-49cc-bb52-82dc34ea46ec","Type":"ContainerStarted","Data":"4dd7dcf6024fd47fb7c4424b294f5cadc4f936ab98e05bb09fe4f5e3d7651e94"} Nov 28 13:41:13 crc kubenswrapper[4857]: I1128 13:41:13.161225 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 13:41:13 crc kubenswrapper[4857]: I1128 13:41:13.163792 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82de4c9c-25af-49e1-9579-f51ad9a3d4ec","Type":"ContainerStarted","Data":"42fdf039da249011bb4f98ee0ccc5c75823dcc18060eaa7491fb2c3d14d51398"} Nov 28 13:41:13 crc kubenswrapper[4857]: I1128 13:41:13.199670 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.199643798 podStartE2EDuration="3.199643798s" podCreationTimestamp="2025-11-28 13:41:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:41:13.18026958 +0000 UTC m=+1365.207644757" watchObservedRunningTime="2025-11-28 13:41:13.199643798 +0000 UTC m=+1365.227018965" Nov 28 13:41:13 crc kubenswrapper[4857]: I1128 13:41:13.221005 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.9650072729999999 podStartE2EDuration="5.220980711s" podCreationTimestamp="2025-11-28 13:41:08 +0000 UTC" firstStartedPulling="2025-11-28 13:41:09.114369453 +0000 UTC m=+1361.141744620" lastFinishedPulling="2025-11-28 13:41:12.370342891 +0000 UTC m=+1364.397718058" observedRunningTime="2025-11-28 13:41:13.21294687 +0000 UTC m=+1365.240322037" watchObservedRunningTime="2025-11-28 13:41:13.220980711 +0000 UTC m=+1365.248355878" Nov 28 13:41:13 crc kubenswrapper[4857]: I1128 13:41:13.426988 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:41:13 crc kubenswrapper[4857]: I1128 13:41:13.505401 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-6c6sw"] Nov 28 13:41:13 crc kubenswrapper[4857]: I1128 13:41:13.505619 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" podUID="e96f8a95-2dec-4981-876b-869966a67b72" containerName="dnsmasq-dns" containerID="cri-o://89da98a0c10b1faa48fafd6ba314782afe1ae3811d31a641ddb07be661fdbe5e" gracePeriod=10 Nov 28 13:41:13 crc kubenswrapper[4857]: I1128 13:41:13.652148 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 13:41:13 crc kubenswrapper[4857]: I1128 13:41:13.711586 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:41:13 crc kubenswrapper[4857]: I1128 13:41:13.955335 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.175513 4857 generic.go:334] "Generic (PLEG): container finished" podID="e96f8a95-2dec-4981-876b-869966a67b72" containerID="89da98a0c10b1faa48fafd6ba314782afe1ae3811d31a641ddb07be661fdbe5e" exitCode=0 Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.175691 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="607af733-25c0-43f4-8be8-328836908ca1" containerName="cinder-scheduler" containerID="cri-o://6651476b035972f0b29c30fd721cd5e597e49f641f8da373c883918d2676e924" gracePeriod=30 Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.175941 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" event={"ID":"e96f8a95-2dec-4981-876b-869966a67b72","Type":"ContainerDied","Data":"89da98a0c10b1faa48fafd6ba314782afe1ae3811d31a641ddb07be661fdbe5e"} Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.175964 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" event={"ID":"e96f8a95-2dec-4981-876b-869966a67b72","Type":"ContainerDied","Data":"b45de0a46458e9f92807085970245ace4538255233f841beec59b789730b57f0"} Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.175976 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b45de0a46458e9f92807085970245ace4538255233f841beec59b789730b57f0" Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.180171 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="607af733-25c0-43f4-8be8-328836908ca1" containerName="probe" containerID="cri-o://04a1f1011ba9934a5850d4fcba66f3715df0a99289167cdc261fc44efebc3082" gracePeriod=30 Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.182146 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.214481 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.247086 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-config\") pod \"e96f8a95-2dec-4981-876b-869966a67b72\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.247135 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-dns-svc\") pod \"e96f8a95-2dec-4981-876b-869966a67b72\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.247178 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ncd4v\" (UniqueName: \"kubernetes.io/projected/e96f8a95-2dec-4981-876b-869966a67b72-kube-api-access-ncd4v\") pod \"e96f8a95-2dec-4981-876b-869966a67b72\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.247235 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-ovsdbserver-nb\") pod \"e96f8a95-2dec-4981-876b-869966a67b72\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.247255 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-ovsdbserver-sb\") pod \"e96f8a95-2dec-4981-876b-869966a67b72\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.354445 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-dns-swift-storage-0\") pod \"e96f8a95-2dec-4981-876b-869966a67b72\" (UID: \"e96f8a95-2dec-4981-876b-869966a67b72\") " Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.366688 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e96f8a95-2dec-4981-876b-869966a67b72-kube-api-access-ncd4v" (OuterVolumeSpecName: "kube-api-access-ncd4v") pod "e96f8a95-2dec-4981-876b-869966a67b72" (UID: "e96f8a95-2dec-4981-876b-869966a67b72"). InnerVolumeSpecName "kube-api-access-ncd4v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.398847 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-config" (OuterVolumeSpecName: "config") pod "e96f8a95-2dec-4981-876b-869966a67b72" (UID: "e96f8a95-2dec-4981-876b-869966a67b72"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.444950 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e96f8a95-2dec-4981-876b-869966a67b72" (UID: "e96f8a95-2dec-4981-876b-869966a67b72"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.460382 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.460408 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.460417 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ncd4v\" (UniqueName: \"kubernetes.io/projected/e96f8a95-2dec-4981-876b-869966a67b72-kube-api-access-ncd4v\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.550483 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e96f8a95-2dec-4981-876b-869966a67b72" (UID: "e96f8a95-2dec-4981-876b-869966a67b72"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.562066 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.586347 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e96f8a95-2dec-4981-876b-869966a67b72" (UID: "e96f8a95-2dec-4981-876b-869966a67b72"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.656204 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e96f8a95-2dec-4981-876b-869966a67b72" (UID: "e96f8a95-2dec-4981-876b-869966a67b72"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.665807 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:14 crc kubenswrapper[4857]: I1128 13:41:14.665841 4857 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e96f8a95-2dec-4981-876b-869966a67b72-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:15 crc kubenswrapper[4857]: I1128 13:41:15.182953 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" Nov 28 13:41:15 crc kubenswrapper[4857]: I1128 13:41:15.218527 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-6c6sw"] Nov 28 13:41:15 crc kubenswrapper[4857]: I1128 13:41:15.238652 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-6c6sw"] Nov 28 13:41:15 crc kubenswrapper[4857]: I1128 13:41:15.693638 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:41:15 crc kubenswrapper[4857]: I1128 13:41:15.835449 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:41:16 crc kubenswrapper[4857]: I1128 13:41:16.193878 4857 generic.go:334] "Generic (PLEG): container finished" podID="607af733-25c0-43f4-8be8-328836908ca1" containerID="04a1f1011ba9934a5850d4fcba66f3715df0a99289167cdc261fc44efebc3082" exitCode=0 Nov 28 13:41:16 crc kubenswrapper[4857]: I1128 13:41:16.194669 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"607af733-25c0-43f4-8be8-328836908ca1","Type":"ContainerDied","Data":"04a1f1011ba9934a5850d4fcba66f3715df0a99289167cdc261fc44efebc3082"} Nov 28 13:41:16 crc kubenswrapper[4857]: I1128 13:41:16.321097 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e96f8a95-2dec-4981-876b-869966a67b72" path="/var/lib/kubelet/pods/e96f8a95-2dec-4981-876b-869966a67b72/volumes" Nov 28 13:41:16 crc kubenswrapper[4857]: I1128 13:41:16.448218 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:41:16 crc kubenswrapper[4857]: I1128 13:41:16.521705 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5b54986f64-dxw54"] Nov 28 13:41:16 crc kubenswrapper[4857]: I1128 13:41:16.521977 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5b54986f64-dxw54" podUID="3fcb638a-dab8-414e-9d24-e49c8437672d" containerName="neutron-api" containerID="cri-o://c859c94bc4ee582de35c29d3a653e1f9ca193e86a0cc31648fb667168c21e37b" gracePeriod=30 Nov 28 13:41:16 crc kubenswrapper[4857]: I1128 13:41:16.522460 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5b54986f64-dxw54" podUID="3fcb638a-dab8-414e-9d24-e49c8437672d" containerName="neutron-httpd" containerID="cri-o://1b7fd768b948dcb6059f83665784c09b36fe6390b7629371594c8c3421176880" gracePeriod=30 Nov 28 13:41:16 crc kubenswrapper[4857]: I1128 13:41:16.601266 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 13:41:17 crc kubenswrapper[4857]: I1128 13:41:17.205129 4857 generic.go:334] "Generic (PLEG): container finished" podID="3fcb638a-dab8-414e-9d24-e49c8437672d" containerID="1b7fd768b948dcb6059f83665784c09b36fe6390b7629371594c8c3421176880" exitCode=0 Nov 28 13:41:17 crc kubenswrapper[4857]: I1128 13:41:17.205177 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5b54986f64-dxw54" event={"ID":"3fcb638a-dab8-414e-9d24-e49c8437672d","Type":"ContainerDied","Data":"1b7fd768b948dcb6059f83665784c09b36fe6390b7629371594c8c3421176880"} Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.714962 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.790361 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.833604 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6t9xv\" (UniqueName: \"kubernetes.io/projected/607af733-25c0-43f4-8be8-328836908ca1-kube-api-access-6t9xv\") pod \"607af733-25c0-43f4-8be8-328836908ca1\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.833734 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-combined-ca-bundle\") pod \"607af733-25c0-43f4-8be8-328836908ca1\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.834900 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-scripts\") pod \"607af733-25c0-43f4-8be8-328836908ca1\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.834951 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-config-data\") pod \"607af733-25c0-43f4-8be8-328836908ca1\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.834978 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/607af733-25c0-43f4-8be8-328836908ca1-etc-machine-id\") pod \"607af733-25c0-43f4-8be8-328836908ca1\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.835022 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-config-data-custom\") pod \"607af733-25c0-43f4-8be8-328836908ca1\" (UID: \"607af733-25c0-43f4-8be8-328836908ca1\") " Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.839088 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/607af733-25c0-43f4-8be8-328836908ca1-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "607af733-25c0-43f4-8be8-328836908ca1" (UID: "607af733-25c0-43f4-8be8-328836908ca1"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.842421 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "607af733-25c0-43f4-8be8-328836908ca1" (UID: "607af733-25c0-43f4-8be8-328836908ca1"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.843668 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/607af733-25c0-43f4-8be8-328836908ca1-kube-api-access-6t9xv" (OuterVolumeSpecName: "kube-api-access-6t9xv") pod "607af733-25c0-43f4-8be8-328836908ca1" (UID: "607af733-25c0-43f4-8be8-328836908ca1"). InnerVolumeSpecName "kube-api-access-6t9xv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.865336 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-scripts" (OuterVolumeSpecName: "scripts") pod "607af733-25c0-43f4-8be8-328836908ca1" (UID: "607af733-25c0-43f4-8be8-328836908ca1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.933911 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "607af733-25c0-43f4-8be8-328836908ca1" (UID: "607af733-25c0-43f4-8be8-328836908ca1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.936695 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6t9xv\" (UniqueName: \"kubernetes.io/projected/607af733-25c0-43f4-8be8-328836908ca1-kube-api-access-6t9xv\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.936719 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.936728 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.936737 4857 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/607af733-25c0-43f4-8be8-328836908ca1-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.936760 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:18 crc kubenswrapper[4857]: I1128 13:41:18.941730 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6b7b667979-6c6sw" podUID="e96f8a95-2dec-4981-876b-869966a67b72" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.151:5353: i/o timeout" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.046953 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-config-data" (OuterVolumeSpecName: "config-data") pod "607af733-25c0-43f4-8be8-328836908ca1" (UID: "607af733-25c0-43f4-8be8-328836908ca1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.141429 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/607af733-25c0-43f4-8be8-328836908ca1-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.232076 4857 generic.go:334] "Generic (PLEG): container finished" podID="607af733-25c0-43f4-8be8-328836908ca1" containerID="6651476b035972f0b29c30fd721cd5e597e49f641f8da373c883918d2676e924" exitCode=0 Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.232125 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"607af733-25c0-43f4-8be8-328836908ca1","Type":"ContainerDied","Data":"6651476b035972f0b29c30fd721cd5e597e49f641f8da373c883918d2676e924"} Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.232158 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"607af733-25c0-43f4-8be8-328836908ca1","Type":"ContainerDied","Data":"7601d9063bdf9e7b2ac1ac8c59b824b649c3ec3d61244763c07c03a4759dbb7a"} Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.232175 4857 scope.go:117] "RemoveContainer" containerID="04a1f1011ba9934a5850d4fcba66f3715df0a99289167cdc261fc44efebc3082" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.232307 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.317927 4857 scope.go:117] "RemoveContainer" containerID="6651476b035972f0b29c30fd721cd5e597e49f641f8da373c883918d2676e924" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.320262 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.328811 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.350784 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:41:19 crc kubenswrapper[4857]: E1128 13:41:19.351380 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="607af733-25c0-43f4-8be8-328836908ca1" containerName="cinder-scheduler" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.351491 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="607af733-25c0-43f4-8be8-328836908ca1" containerName="cinder-scheduler" Nov 28 13:41:19 crc kubenswrapper[4857]: E1128 13:41:19.351558 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e96f8a95-2dec-4981-876b-869966a67b72" containerName="dnsmasq-dns" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.351610 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e96f8a95-2dec-4981-876b-869966a67b72" containerName="dnsmasq-dns" Nov 28 13:41:19 crc kubenswrapper[4857]: E1128 13:41:19.351696 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="607af733-25c0-43f4-8be8-328836908ca1" containerName="probe" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.351859 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="607af733-25c0-43f4-8be8-328836908ca1" containerName="probe" Nov 28 13:41:19 crc kubenswrapper[4857]: E1128 13:41:19.351967 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e96f8a95-2dec-4981-876b-869966a67b72" containerName="init" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.352049 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e96f8a95-2dec-4981-876b-869966a67b72" containerName="init" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.352283 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="607af733-25c0-43f4-8be8-328836908ca1" containerName="probe" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.352363 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e96f8a95-2dec-4981-876b-869966a67b72" containerName="dnsmasq-dns" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.352445 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="607af733-25c0-43f4-8be8-328836908ca1" containerName="cinder-scheduler" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.353512 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.356329 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.360120 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.378299 4857 scope.go:117] "RemoveContainer" containerID="04a1f1011ba9934a5850d4fcba66f3715df0a99289167cdc261fc44efebc3082" Nov 28 13:41:19 crc kubenswrapper[4857]: E1128 13:41:19.386978 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04a1f1011ba9934a5850d4fcba66f3715df0a99289167cdc261fc44efebc3082\": container with ID starting with 04a1f1011ba9934a5850d4fcba66f3715df0a99289167cdc261fc44efebc3082 not found: ID does not exist" containerID="04a1f1011ba9934a5850d4fcba66f3715df0a99289167cdc261fc44efebc3082" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.387018 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04a1f1011ba9934a5850d4fcba66f3715df0a99289167cdc261fc44efebc3082"} err="failed to get container status \"04a1f1011ba9934a5850d4fcba66f3715df0a99289167cdc261fc44efebc3082\": rpc error: code = NotFound desc = could not find container \"04a1f1011ba9934a5850d4fcba66f3715df0a99289167cdc261fc44efebc3082\": container with ID starting with 04a1f1011ba9934a5850d4fcba66f3715df0a99289167cdc261fc44efebc3082 not found: ID does not exist" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.387043 4857 scope.go:117] "RemoveContainer" containerID="6651476b035972f0b29c30fd721cd5e597e49f641f8da373c883918d2676e924" Nov 28 13:41:19 crc kubenswrapper[4857]: E1128 13:41:19.387548 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6651476b035972f0b29c30fd721cd5e597e49f641f8da373c883918d2676e924\": container with ID starting with 6651476b035972f0b29c30fd721cd5e597e49f641f8da373c883918d2676e924 not found: ID does not exist" containerID="6651476b035972f0b29c30fd721cd5e597e49f641f8da373c883918d2676e924" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.387578 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6651476b035972f0b29c30fd721cd5e597e49f641f8da373c883918d2676e924"} err="failed to get container status \"6651476b035972f0b29c30fd721cd5e597e49f641f8da373c883918d2676e924\": rpc error: code = NotFound desc = could not find container \"6651476b035972f0b29c30fd721cd5e597e49f641f8da373c883918d2676e924\": container with ID starting with 6651476b035972f0b29c30fd721cd5e597e49f641f8da373c883918d2676e924 not found: ID does not exist" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.446884 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-scripts\") pod \"cinder-scheduler-0\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.446988 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.447089 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a53cec78-89c3-4495-8af6-4caf4f018cc1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.447129 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h72g5\" (UniqueName: \"kubernetes.io/projected/a53cec78-89c3-4495-8af6-4caf4f018cc1-kube-api-access-h72g5\") pod \"cinder-scheduler-0\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.447162 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-config-data\") pod \"cinder-scheduler-0\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.447217 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.548605 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a53cec78-89c3-4495-8af6-4caf4f018cc1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.548671 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h72g5\" (UniqueName: \"kubernetes.io/projected/a53cec78-89c3-4495-8af6-4caf4f018cc1-kube-api-access-h72g5\") pod \"cinder-scheduler-0\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.548716 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-config-data\") pod \"cinder-scheduler-0\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.548732 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a53cec78-89c3-4495-8af6-4caf4f018cc1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.548794 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.549008 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-scripts\") pod \"cinder-scheduler-0\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.549061 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.553583 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.554310 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-scripts\") pod \"cinder-scheduler-0\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.554488 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.555850 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-config-data\") pod \"cinder-scheduler-0\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.569439 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h72g5\" (UniqueName: \"kubernetes.io/projected/a53cec78-89c3-4495-8af6-4caf4f018cc1-kube-api-access-h72g5\") pod \"cinder-scheduler-0\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " pod="openstack/cinder-scheduler-0" Nov 28 13:41:19 crc kubenswrapper[4857]: I1128 13:41:19.686544 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 13:41:20 crc kubenswrapper[4857]: I1128 13:41:20.160581 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:41:20 crc kubenswrapper[4857]: I1128 13:41:20.246091 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a53cec78-89c3-4495-8af6-4caf4f018cc1","Type":"ContainerStarted","Data":"eae65d10eff323342c881919bd6d872994f4a98e32c7f45bf6b6f9513af9dc8e"} Nov 28 13:41:20 crc kubenswrapper[4857]: I1128 13:41:20.343915 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="607af733-25c0-43f4-8be8-328836908ca1" path="/var/lib/kubelet/pods/607af733-25c0-43f4-8be8-328836908ca1/volumes" Nov 28 13:41:20 crc kubenswrapper[4857]: I1128 13:41:20.383826 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:20 crc kubenswrapper[4857]: I1128 13:41:20.881659 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:41:20 crc kubenswrapper[4857]: I1128 13:41:20.967906 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5f5cc64f6b-5tvl6"] Nov 28 13:41:20 crc kubenswrapper[4857]: I1128 13:41:20.968176 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5f5cc64f6b-5tvl6" podUID="fa95429a-5622-4bcb-a065-8ff916c55bb9" containerName="barbican-api-log" containerID="cri-o://596175288a06a7a15c708b2eee248bdbe809f62ad2e84d79070f3281ec4e96d0" gracePeriod=30 Nov 28 13:41:20 crc kubenswrapper[4857]: I1128 13:41:20.968341 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5f5cc64f6b-5tvl6" podUID="fa95429a-5622-4bcb-a065-8ff916c55bb9" containerName="barbican-api" containerID="cri-o://4afcfea2a87b26b9b4261f7f29aa83d6eaf1b3dba1540e3bfbfdd796e9be994d" gracePeriod=30 Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.012514 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.014443 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.017221 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-nds47" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.017511 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.017949 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.031143 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.090209 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/715273a2-99a1-4c76-9d65-f1f31770ec14-combined-ca-bundle\") pod \"openstackclient\" (UID: \"715273a2-99a1-4c76-9d65-f1f31770ec14\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.090275 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfcx9\" (UniqueName: \"kubernetes.io/projected/715273a2-99a1-4c76-9d65-f1f31770ec14-kube-api-access-bfcx9\") pod \"openstackclient\" (UID: \"715273a2-99a1-4c76-9d65-f1f31770ec14\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.090307 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/715273a2-99a1-4c76-9d65-f1f31770ec14-openstack-config\") pod \"openstackclient\" (UID: \"715273a2-99a1-4c76-9d65-f1f31770ec14\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.090330 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/715273a2-99a1-4c76-9d65-f1f31770ec14-openstack-config-secret\") pod \"openstackclient\" (UID: \"715273a2-99a1-4c76-9d65-f1f31770ec14\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.204977 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/715273a2-99a1-4c76-9d65-f1f31770ec14-combined-ca-bundle\") pod \"openstackclient\" (UID: \"715273a2-99a1-4c76-9d65-f1f31770ec14\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.206182 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfcx9\" (UniqueName: \"kubernetes.io/projected/715273a2-99a1-4c76-9d65-f1f31770ec14-kube-api-access-bfcx9\") pod \"openstackclient\" (UID: \"715273a2-99a1-4c76-9d65-f1f31770ec14\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.206288 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/715273a2-99a1-4c76-9d65-f1f31770ec14-openstack-config\") pod \"openstackclient\" (UID: \"715273a2-99a1-4c76-9d65-f1f31770ec14\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.206374 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/715273a2-99a1-4c76-9d65-f1f31770ec14-openstack-config-secret\") pod \"openstackclient\" (UID: \"715273a2-99a1-4c76-9d65-f1f31770ec14\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.209822 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/715273a2-99a1-4c76-9d65-f1f31770ec14-openstack-config\") pod \"openstackclient\" (UID: \"715273a2-99a1-4c76-9d65-f1f31770ec14\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.210489 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/715273a2-99a1-4c76-9d65-f1f31770ec14-combined-ca-bundle\") pod \"openstackclient\" (UID: \"715273a2-99a1-4c76-9d65-f1f31770ec14\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.218173 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/715273a2-99a1-4c76-9d65-f1f31770ec14-openstack-config-secret\") pod \"openstackclient\" (UID: \"715273a2-99a1-4c76-9d65-f1f31770ec14\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.237629 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfcx9\" (UniqueName: \"kubernetes.io/projected/715273a2-99a1-4c76-9d65-f1f31770ec14-kube-api-access-bfcx9\") pod \"openstackclient\" (UID: \"715273a2-99a1-4c76-9d65-f1f31770ec14\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.296131 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a53cec78-89c3-4495-8af6-4caf4f018cc1","Type":"ContainerStarted","Data":"2b1f1cfc83df026dae7bf7bf7c447aef3e892986be530a90b20b933b8fe1c77c"} Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.300340 4857 generic.go:334] "Generic (PLEG): container finished" podID="fa95429a-5622-4bcb-a065-8ff916c55bb9" containerID="596175288a06a7a15c708b2eee248bdbe809f62ad2e84d79070f3281ec4e96d0" exitCode=143 Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.301117 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f5cc64f6b-5tvl6" event={"ID":"fa95429a-5622-4bcb-a065-8ff916c55bb9","Type":"ContainerDied","Data":"596175288a06a7a15c708b2eee248bdbe809f62ad2e84d79070f3281ec4e96d0"} Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.335883 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.342797 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.362959 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.371053 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.372574 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.387950 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.522737 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xkvx\" (UniqueName: \"kubernetes.io/projected/07621208-d831-4470-908c-76084c830753-kube-api-access-9xkvx\") pod \"openstackclient\" (UID: \"07621208-d831-4470-908c-76084c830753\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.523088 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/07621208-d831-4470-908c-76084c830753-openstack-config-secret\") pod \"openstackclient\" (UID: \"07621208-d831-4470-908c-76084c830753\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.523155 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/07621208-d831-4470-908c-76084c830753-openstack-config\") pod \"openstackclient\" (UID: \"07621208-d831-4470-908c-76084c830753\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.523197 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07621208-d831-4470-908c-76084c830753-combined-ca-bundle\") pod \"openstackclient\" (UID: \"07621208-d831-4470-908c-76084c830753\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.624620 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xkvx\" (UniqueName: \"kubernetes.io/projected/07621208-d831-4470-908c-76084c830753-kube-api-access-9xkvx\") pod \"openstackclient\" (UID: \"07621208-d831-4470-908c-76084c830753\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.624688 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/07621208-d831-4470-908c-76084c830753-openstack-config-secret\") pod \"openstackclient\" (UID: \"07621208-d831-4470-908c-76084c830753\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.624738 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/07621208-d831-4470-908c-76084c830753-openstack-config\") pod \"openstackclient\" (UID: \"07621208-d831-4470-908c-76084c830753\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.624783 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07621208-d831-4470-908c-76084c830753-combined-ca-bundle\") pod \"openstackclient\" (UID: \"07621208-d831-4470-908c-76084c830753\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.626337 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/07621208-d831-4470-908c-76084c830753-openstack-config\") pod \"openstackclient\" (UID: \"07621208-d831-4470-908c-76084c830753\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.637327 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/07621208-d831-4470-908c-76084c830753-openstack-config-secret\") pod \"openstackclient\" (UID: \"07621208-d831-4470-908c-76084c830753\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: E1128 13:41:21.638978 4857 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 28 13:41:21 crc kubenswrapper[4857]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_715273a2-99a1-4c76-9d65-f1f31770ec14_0(c150814c3282d291a4c2ed0f0e5077c749dadce304e630c8f10b902738345b6e): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"c150814c3282d291a4c2ed0f0e5077c749dadce304e630c8f10b902738345b6e" Netns:"/var/run/netns/e059b0a9-437a-4545-9ecb-279e8b023310" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=c150814c3282d291a4c2ed0f0e5077c749dadce304e630c8f10b902738345b6e;K8S_POD_UID=715273a2-99a1-4c76-9d65-f1f31770ec14" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/715273a2-99a1-4c76-9d65-f1f31770ec14]: expected pod UID "715273a2-99a1-4c76-9d65-f1f31770ec14" but got "07621208-d831-4470-908c-76084c830753" from Kube API Nov 28 13:41:21 crc kubenswrapper[4857]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 13:41:21 crc kubenswrapper[4857]: > Nov 28 13:41:21 crc kubenswrapper[4857]: E1128 13:41:21.639026 4857 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 28 13:41:21 crc kubenswrapper[4857]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_715273a2-99a1-4c76-9d65-f1f31770ec14_0(c150814c3282d291a4c2ed0f0e5077c749dadce304e630c8f10b902738345b6e): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"c150814c3282d291a4c2ed0f0e5077c749dadce304e630c8f10b902738345b6e" Netns:"/var/run/netns/e059b0a9-437a-4545-9ecb-279e8b023310" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=c150814c3282d291a4c2ed0f0e5077c749dadce304e630c8f10b902738345b6e;K8S_POD_UID=715273a2-99a1-4c76-9d65-f1f31770ec14" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/715273a2-99a1-4c76-9d65-f1f31770ec14]: expected pod UID "715273a2-99a1-4c76-9d65-f1f31770ec14" but got "07621208-d831-4470-908c-76084c830753" from Kube API Nov 28 13:41:21 crc kubenswrapper[4857]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 13:41:21 crc kubenswrapper[4857]: > pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.652366 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07621208-d831-4470-908c-76084c830753-combined-ca-bundle\") pod \"openstackclient\" (UID: \"07621208-d831-4470-908c-76084c830753\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.653071 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xkvx\" (UniqueName: \"kubernetes.io/projected/07621208-d831-4470-908c-76084c830753-kube-api-access-9xkvx\") pod \"openstackclient\" (UID: \"07621208-d831-4470-908c-76084c830753\") " pod="openstack/openstackclient" Nov 28 13:41:21 crc kubenswrapper[4857]: I1128 13:41:21.706512 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.219311 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.312476 4857 generic.go:334] "Generic (PLEG): container finished" podID="3fcb638a-dab8-414e-9d24-e49c8437672d" containerID="c859c94bc4ee582de35c29d3a653e1f9ca193e86a0cc31648fb667168c21e37b" exitCode=0 Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.327156 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5b54986f64-dxw54" event={"ID":"3fcb638a-dab8-414e-9d24-e49c8437672d","Type":"ContainerDied","Data":"c859c94bc4ee582de35c29d3a653e1f9ca193e86a0cc31648fb667168c21e37b"} Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.327188 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"07621208-d831-4470-908c-76084c830753","Type":"ContainerStarted","Data":"68779f74778f2879e5c9e4eb3f071d792203766ea32dbd8814522231938c623c"} Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.333630 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.334637 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a53cec78-89c3-4495-8af6-4caf4f018cc1","Type":"ContainerStarted","Data":"5ac6adaa76a02bc0a74df277af75098ac24f5239a90ad5f23966871efb74d2a3"} Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.350731 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.358083 4857 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="715273a2-99a1-4c76-9d65-f1f31770ec14" podUID="07621208-d831-4470-908c-76084c830753" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.364553 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.36454235 podStartE2EDuration="3.36454235s" podCreationTimestamp="2025-11-28 13:41:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:41:22.355774468 +0000 UTC m=+1374.383149635" watchObservedRunningTime="2025-11-28 13:41:22.36454235 +0000 UTC m=+1374.391917517" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.440875 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/715273a2-99a1-4c76-9d65-f1f31770ec14-combined-ca-bundle\") pod \"715273a2-99a1-4c76-9d65-f1f31770ec14\" (UID: \"715273a2-99a1-4c76-9d65-f1f31770ec14\") " Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.440991 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/715273a2-99a1-4c76-9d65-f1f31770ec14-openstack-config-secret\") pod \"715273a2-99a1-4c76-9d65-f1f31770ec14\" (UID: \"715273a2-99a1-4c76-9d65-f1f31770ec14\") " Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.441028 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bfcx9\" (UniqueName: \"kubernetes.io/projected/715273a2-99a1-4c76-9d65-f1f31770ec14-kube-api-access-bfcx9\") pod \"715273a2-99a1-4c76-9d65-f1f31770ec14\" (UID: \"715273a2-99a1-4c76-9d65-f1f31770ec14\") " Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.441047 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/715273a2-99a1-4c76-9d65-f1f31770ec14-openstack-config\") pod \"715273a2-99a1-4c76-9d65-f1f31770ec14\" (UID: \"715273a2-99a1-4c76-9d65-f1f31770ec14\") " Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.444498 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/715273a2-99a1-4c76-9d65-f1f31770ec14-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "715273a2-99a1-4c76-9d65-f1f31770ec14" (UID: "715273a2-99a1-4c76-9d65-f1f31770ec14"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.448705 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/715273a2-99a1-4c76-9d65-f1f31770ec14-kube-api-access-bfcx9" (OuterVolumeSpecName: "kube-api-access-bfcx9") pod "715273a2-99a1-4c76-9d65-f1f31770ec14" (UID: "715273a2-99a1-4c76-9d65-f1f31770ec14"). InnerVolumeSpecName "kube-api-access-bfcx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.448924 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/715273a2-99a1-4c76-9d65-f1f31770ec14-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "715273a2-99a1-4c76-9d65-f1f31770ec14" (UID: "715273a2-99a1-4c76-9d65-f1f31770ec14"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.469207 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/715273a2-99a1-4c76-9d65-f1f31770ec14-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "715273a2-99a1-4c76-9d65-f1f31770ec14" (UID: "715273a2-99a1-4c76-9d65-f1f31770ec14"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.543436 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/715273a2-99a1-4c76-9d65-f1f31770ec14-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.543722 4857 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/715273a2-99a1-4c76-9d65-f1f31770ec14-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.543735 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bfcx9\" (UniqueName: \"kubernetes.io/projected/715273a2-99a1-4c76-9d65-f1f31770ec14-kube-api-access-bfcx9\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.543744 4857 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/715273a2-99a1-4c76-9d65-f1f31770ec14-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.726466 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.847552 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-config\") pod \"3fcb638a-dab8-414e-9d24-e49c8437672d\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.847696 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-ovndb-tls-certs\") pod \"3fcb638a-dab8-414e-9d24-e49c8437672d\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.847844 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-httpd-config\") pod \"3fcb638a-dab8-414e-9d24-e49c8437672d\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.847899 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vwn48\" (UniqueName: \"kubernetes.io/projected/3fcb638a-dab8-414e-9d24-e49c8437672d-kube-api-access-vwn48\") pod \"3fcb638a-dab8-414e-9d24-e49c8437672d\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.847920 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-combined-ca-bundle\") pod \"3fcb638a-dab8-414e-9d24-e49c8437672d\" (UID: \"3fcb638a-dab8-414e-9d24-e49c8437672d\") " Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.857957 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fcb638a-dab8-414e-9d24-e49c8437672d-kube-api-access-vwn48" (OuterVolumeSpecName: "kube-api-access-vwn48") pod "3fcb638a-dab8-414e-9d24-e49c8437672d" (UID: "3fcb638a-dab8-414e-9d24-e49c8437672d"). InnerVolumeSpecName "kube-api-access-vwn48". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.859845 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "3fcb638a-dab8-414e-9d24-e49c8437672d" (UID: "3fcb638a-dab8-414e-9d24-e49c8437672d"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.902714 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3fcb638a-dab8-414e-9d24-e49c8437672d" (UID: "3fcb638a-dab8-414e-9d24-e49c8437672d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.922825 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-config" (OuterVolumeSpecName: "config") pod "3fcb638a-dab8-414e-9d24-e49c8437672d" (UID: "3fcb638a-dab8-414e-9d24-e49c8437672d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.935339 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "3fcb638a-dab8-414e-9d24-e49c8437672d" (UID: "3fcb638a-dab8-414e-9d24-e49c8437672d"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.949481 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.949517 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vwn48\" (UniqueName: \"kubernetes.io/projected/3fcb638a-dab8-414e-9d24-e49c8437672d-kube-api-access-vwn48\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.949544 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.949554 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:22 crc kubenswrapper[4857]: I1128 13:41:22.949561 4857 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3fcb638a-dab8-414e-9d24-e49c8437672d-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:23 crc kubenswrapper[4857]: I1128 13:41:23.351258 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 13:41:23 crc kubenswrapper[4857]: I1128 13:41:23.351415 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5b54986f64-dxw54" Nov 28 13:41:23 crc kubenswrapper[4857]: I1128 13:41:23.351461 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5b54986f64-dxw54" event={"ID":"3fcb638a-dab8-414e-9d24-e49c8437672d","Type":"ContainerDied","Data":"8500d968792fa6ebcdfd06932333ee1050adfe957118738a54feed91470eda76"} Nov 28 13:41:23 crc kubenswrapper[4857]: I1128 13:41:23.351867 4857 scope.go:117] "RemoveContainer" containerID="1b7fd768b948dcb6059f83665784c09b36fe6390b7629371594c8c3421176880" Nov 28 13:41:23 crc kubenswrapper[4857]: I1128 13:41:23.367787 4857 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="715273a2-99a1-4c76-9d65-f1f31770ec14" podUID="07621208-d831-4470-908c-76084c830753" Nov 28 13:41:23 crc kubenswrapper[4857]: I1128 13:41:23.390107 4857 scope.go:117] "RemoveContainer" containerID="c859c94bc4ee582de35c29d3a653e1f9ca193e86a0cc31648fb667168c21e37b" Nov 28 13:41:23 crc kubenswrapper[4857]: I1128 13:41:23.392812 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5b54986f64-dxw54"] Nov 28 13:41:23 crc kubenswrapper[4857]: I1128 13:41:23.401467 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5b54986f64-dxw54"] Nov 28 13:41:23 crc kubenswrapper[4857]: I1128 13:41:23.970444 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.322557 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3fcb638a-dab8-414e-9d24-e49c8437672d" path="/var/lib/kubelet/pods/3fcb638a-dab8-414e-9d24-e49c8437672d/volumes" Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.323388 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="715273a2-99a1-4c76-9d65-f1f31770ec14" path="/var/lib/kubelet/pods/715273a2-99a1-4c76-9d65-f1f31770ec14/volumes" Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.687265 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.718801 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.781208 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2r7l4\" (UniqueName: \"kubernetes.io/projected/fa95429a-5622-4bcb-a065-8ff916c55bb9-kube-api-access-2r7l4\") pod \"fa95429a-5622-4bcb-a065-8ff916c55bb9\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.781360 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa95429a-5622-4bcb-a065-8ff916c55bb9-combined-ca-bundle\") pod \"fa95429a-5622-4bcb-a065-8ff916c55bb9\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.781409 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fa95429a-5622-4bcb-a065-8ff916c55bb9-config-data-custom\") pod \"fa95429a-5622-4bcb-a065-8ff916c55bb9\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.781439 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa95429a-5622-4bcb-a065-8ff916c55bb9-logs\") pod \"fa95429a-5622-4bcb-a065-8ff916c55bb9\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.781473 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa95429a-5622-4bcb-a065-8ff916c55bb9-config-data\") pod \"fa95429a-5622-4bcb-a065-8ff916c55bb9\" (UID: \"fa95429a-5622-4bcb-a065-8ff916c55bb9\") " Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.782525 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa95429a-5622-4bcb-a065-8ff916c55bb9-logs" (OuterVolumeSpecName: "logs") pod "fa95429a-5622-4bcb-a065-8ff916c55bb9" (UID: "fa95429a-5622-4bcb-a065-8ff916c55bb9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.793966 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa95429a-5622-4bcb-a065-8ff916c55bb9-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "fa95429a-5622-4bcb-a065-8ff916c55bb9" (UID: "fa95429a-5622-4bcb-a065-8ff916c55bb9"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.794019 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa95429a-5622-4bcb-a065-8ff916c55bb9-kube-api-access-2r7l4" (OuterVolumeSpecName: "kube-api-access-2r7l4") pod "fa95429a-5622-4bcb-a065-8ff916c55bb9" (UID: "fa95429a-5622-4bcb-a065-8ff916c55bb9"). InnerVolumeSpecName "kube-api-access-2r7l4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.815372 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa95429a-5622-4bcb-a065-8ff916c55bb9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fa95429a-5622-4bcb-a065-8ff916c55bb9" (UID: "fa95429a-5622-4bcb-a065-8ff916c55bb9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.847872 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa95429a-5622-4bcb-a065-8ff916c55bb9-config-data" (OuterVolumeSpecName: "config-data") pod "fa95429a-5622-4bcb-a065-8ff916c55bb9" (UID: "fa95429a-5622-4bcb-a065-8ff916c55bb9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.883419 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa95429a-5622-4bcb-a065-8ff916c55bb9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.883463 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fa95429a-5622-4bcb-a065-8ff916c55bb9-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.883508 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa95429a-5622-4bcb-a065-8ff916c55bb9-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.883521 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa95429a-5622-4bcb-a065-8ff916c55bb9-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:24 crc kubenswrapper[4857]: I1128 13:41:24.883532 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2r7l4\" (UniqueName: \"kubernetes.io/projected/fa95429a-5622-4bcb-a065-8ff916c55bb9-kube-api-access-2r7l4\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:25 crc kubenswrapper[4857]: I1128 13:41:25.372961 4857 generic.go:334] "Generic (PLEG): container finished" podID="fa95429a-5622-4bcb-a065-8ff916c55bb9" containerID="4afcfea2a87b26b9b4261f7f29aa83d6eaf1b3dba1540e3bfbfdd796e9be994d" exitCode=0 Nov 28 13:41:25 crc kubenswrapper[4857]: I1128 13:41:25.373011 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f5cc64f6b-5tvl6" event={"ID":"fa95429a-5622-4bcb-a065-8ff916c55bb9","Type":"ContainerDied","Data":"4afcfea2a87b26b9b4261f7f29aa83d6eaf1b3dba1540e3bfbfdd796e9be994d"} Nov 28 13:41:25 crc kubenswrapper[4857]: I1128 13:41:25.373026 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5f5cc64f6b-5tvl6" Nov 28 13:41:25 crc kubenswrapper[4857]: I1128 13:41:25.373046 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f5cc64f6b-5tvl6" event={"ID":"fa95429a-5622-4bcb-a065-8ff916c55bb9","Type":"ContainerDied","Data":"e1aa5f085e6e2d5ae825822812e4278d0e2224c7030e07384c5bd34e402ad2b8"} Nov 28 13:41:25 crc kubenswrapper[4857]: I1128 13:41:25.373067 4857 scope.go:117] "RemoveContainer" containerID="4afcfea2a87b26b9b4261f7f29aa83d6eaf1b3dba1540e3bfbfdd796e9be994d" Nov 28 13:41:25 crc kubenswrapper[4857]: I1128 13:41:25.439824 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5f5cc64f6b-5tvl6"] Nov 28 13:41:25 crc kubenswrapper[4857]: I1128 13:41:25.439870 4857 scope.go:117] "RemoveContainer" containerID="596175288a06a7a15c708b2eee248bdbe809f62ad2e84d79070f3281ec4e96d0" Nov 28 13:41:25 crc kubenswrapper[4857]: I1128 13:41:25.447949 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-5f5cc64f6b-5tvl6"] Nov 28 13:41:25 crc kubenswrapper[4857]: I1128 13:41:25.467792 4857 scope.go:117] "RemoveContainer" containerID="4afcfea2a87b26b9b4261f7f29aa83d6eaf1b3dba1540e3bfbfdd796e9be994d" Nov 28 13:41:25 crc kubenswrapper[4857]: E1128 13:41:25.468539 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4afcfea2a87b26b9b4261f7f29aa83d6eaf1b3dba1540e3bfbfdd796e9be994d\": container with ID starting with 4afcfea2a87b26b9b4261f7f29aa83d6eaf1b3dba1540e3bfbfdd796e9be994d not found: ID does not exist" containerID="4afcfea2a87b26b9b4261f7f29aa83d6eaf1b3dba1540e3bfbfdd796e9be994d" Nov 28 13:41:25 crc kubenswrapper[4857]: I1128 13:41:25.468580 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4afcfea2a87b26b9b4261f7f29aa83d6eaf1b3dba1540e3bfbfdd796e9be994d"} err="failed to get container status \"4afcfea2a87b26b9b4261f7f29aa83d6eaf1b3dba1540e3bfbfdd796e9be994d\": rpc error: code = NotFound desc = could not find container \"4afcfea2a87b26b9b4261f7f29aa83d6eaf1b3dba1540e3bfbfdd796e9be994d\": container with ID starting with 4afcfea2a87b26b9b4261f7f29aa83d6eaf1b3dba1540e3bfbfdd796e9be994d not found: ID does not exist" Nov 28 13:41:25 crc kubenswrapper[4857]: I1128 13:41:25.468606 4857 scope.go:117] "RemoveContainer" containerID="596175288a06a7a15c708b2eee248bdbe809f62ad2e84d79070f3281ec4e96d0" Nov 28 13:41:25 crc kubenswrapper[4857]: E1128 13:41:25.468952 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"596175288a06a7a15c708b2eee248bdbe809f62ad2e84d79070f3281ec4e96d0\": container with ID starting with 596175288a06a7a15c708b2eee248bdbe809f62ad2e84d79070f3281ec4e96d0 not found: ID does not exist" containerID="596175288a06a7a15c708b2eee248bdbe809f62ad2e84d79070f3281ec4e96d0" Nov 28 13:41:25 crc kubenswrapper[4857]: I1128 13:41:25.468996 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"596175288a06a7a15c708b2eee248bdbe809f62ad2e84d79070f3281ec4e96d0"} err="failed to get container status \"596175288a06a7a15c708b2eee248bdbe809f62ad2e84d79070f3281ec4e96d0\": rpc error: code = NotFound desc = could not find container \"596175288a06a7a15c708b2eee248bdbe809f62ad2e84d79070f3281ec4e96d0\": container with ID starting with 596175288a06a7a15c708b2eee248bdbe809f62ad2e84d79070f3281ec4e96d0 not found: ID does not exist" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.073465 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-856655ccc5-9fgqc"] Nov 28 13:41:26 crc kubenswrapper[4857]: E1128 13:41:26.074130 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa95429a-5622-4bcb-a065-8ff916c55bb9" containerName="barbican-api" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.074169 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa95429a-5622-4bcb-a065-8ff916c55bb9" containerName="barbican-api" Nov 28 13:41:26 crc kubenswrapper[4857]: E1128 13:41:26.074186 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa95429a-5622-4bcb-a065-8ff916c55bb9" containerName="barbican-api-log" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.074193 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa95429a-5622-4bcb-a065-8ff916c55bb9" containerName="barbican-api-log" Nov 28 13:41:26 crc kubenswrapper[4857]: E1128 13:41:26.074224 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fcb638a-dab8-414e-9d24-e49c8437672d" containerName="neutron-httpd" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.074230 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fcb638a-dab8-414e-9d24-e49c8437672d" containerName="neutron-httpd" Nov 28 13:41:26 crc kubenswrapper[4857]: E1128 13:41:26.074238 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fcb638a-dab8-414e-9d24-e49c8437672d" containerName="neutron-api" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.074245 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fcb638a-dab8-414e-9d24-e49c8437672d" containerName="neutron-api" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.074412 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3fcb638a-dab8-414e-9d24-e49c8437672d" containerName="neutron-httpd" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.074424 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa95429a-5622-4bcb-a065-8ff916c55bb9" containerName="barbican-api-log" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.074443 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3fcb638a-dab8-414e-9d24-e49c8437672d" containerName="neutron-api" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.074449 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa95429a-5622-4bcb-a065-8ff916c55bb9" containerName="barbican-api" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.075445 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.077438 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.078799 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.093619 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-856655ccc5-9fgqc"] Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.093911 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.220785 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7358aa80-dbe4-4a31-ad84-9dc125491046-etc-swift\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.220868 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-combined-ca-bundle\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.220934 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7358aa80-dbe4-4a31-ad84-9dc125491046-log-httpd\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.220958 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-config-data\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.221003 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-public-tls-certs\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.221028 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hsxvf\" (UniqueName: \"kubernetes.io/projected/7358aa80-dbe4-4a31-ad84-9dc125491046-kube-api-access-hsxvf\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.221052 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7358aa80-dbe4-4a31-ad84-9dc125491046-run-httpd\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.221071 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-internal-tls-certs\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.321051 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa95429a-5622-4bcb-a065-8ff916c55bb9" path="/var/lib/kubelet/pods/fa95429a-5622-4bcb-a065-8ff916c55bb9/volumes" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.322215 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7358aa80-dbe4-4a31-ad84-9dc125491046-etc-swift\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.322297 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-combined-ca-bundle\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.322331 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7358aa80-dbe4-4a31-ad84-9dc125491046-log-httpd\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.322352 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-config-data\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.322374 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-public-tls-certs\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.322402 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hsxvf\" (UniqueName: \"kubernetes.io/projected/7358aa80-dbe4-4a31-ad84-9dc125491046-kube-api-access-hsxvf\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.322427 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7358aa80-dbe4-4a31-ad84-9dc125491046-run-httpd\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.322445 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-internal-tls-certs\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.325606 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7358aa80-dbe4-4a31-ad84-9dc125491046-log-httpd\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.325697 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7358aa80-dbe4-4a31-ad84-9dc125491046-run-httpd\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.330452 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7358aa80-dbe4-4a31-ad84-9dc125491046-etc-swift\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.330964 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-internal-tls-certs\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.331009 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-combined-ca-bundle\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.336233 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-public-tls-certs\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.336714 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-config-data\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.344397 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hsxvf\" (UniqueName: \"kubernetes.io/projected/7358aa80-dbe4-4a31-ad84-9dc125491046-kube-api-access-hsxvf\") pod \"swift-proxy-856655ccc5-9fgqc\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.408483 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:26 crc kubenswrapper[4857]: I1128 13:41:26.998349 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-856655ccc5-9fgqc"] Nov 28 13:41:27 crc kubenswrapper[4857]: I1128 13:41:27.402010 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-856655ccc5-9fgqc" event={"ID":"7358aa80-dbe4-4a31-ad84-9dc125491046","Type":"ContainerStarted","Data":"921da2286c74b9205a4963fadea18299c07583052be029c357bcd68f1c378c4d"} Nov 28 13:41:27 crc kubenswrapper[4857]: I1128 13:41:27.402345 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-856655ccc5-9fgqc" event={"ID":"7358aa80-dbe4-4a31-ad84-9dc125491046","Type":"ContainerStarted","Data":"f1269091f3faf9b179a8e0748578833ca79484782be78f7a3c355b5f39a2f17a"} Nov 28 13:41:28 crc kubenswrapper[4857]: I1128 13:41:28.416885 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-856655ccc5-9fgqc" event={"ID":"7358aa80-dbe4-4a31-ad84-9dc125491046","Type":"ContainerStarted","Data":"7f1eabd058b1d022ba7f7cbccd8b90653ba66843bcbc94ab126462d61013e688"} Nov 28 13:41:28 crc kubenswrapper[4857]: I1128 13:41:28.417189 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:28 crc kubenswrapper[4857]: I1128 13:41:28.417210 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:28 crc kubenswrapper[4857]: I1128 13:41:28.439312 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-856655ccc5-9fgqc" podStartSLOduration=2.439297149 podStartE2EDuration="2.439297149s" podCreationTimestamp="2025-11-28 13:41:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:41:28.434792869 +0000 UTC m=+1380.462168046" watchObservedRunningTime="2025-11-28 13:41:28.439297149 +0000 UTC m=+1380.466672316" Nov 28 13:41:29 crc kubenswrapper[4857]: I1128 13:41:29.061248 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:29 crc kubenswrapper[4857]: I1128 13:41:29.061826 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerName="ceilometer-central-agent" containerID="cri-o://efb20f4a4e327f203008825bf6990f8eb7c028b35f176a97f5ec6f6de2f09df8" gracePeriod=30 Nov 28 13:41:29 crc kubenswrapper[4857]: I1128 13:41:29.062488 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerName="proxy-httpd" containerID="cri-o://42fdf039da249011bb4f98ee0ccc5c75823dcc18060eaa7491fb2c3d14d51398" gracePeriod=30 Nov 28 13:41:29 crc kubenswrapper[4857]: I1128 13:41:29.062550 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerName="sg-core" containerID="cri-o://d1daa3e59875008df55b2c17a293c9828d76d615b9a0e170a5e1f7112130e884" gracePeriod=30 Nov 28 13:41:29 crc kubenswrapper[4857]: I1128 13:41:29.062583 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerName="ceilometer-notification-agent" containerID="cri-o://393dc477a432d224c8853c01ad6939e533a259e97bad1e67a93a0e7f77720a1c" gracePeriod=30 Nov 28 13:41:29 crc kubenswrapper[4857]: I1128 13:41:29.077735 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 28 13:41:29 crc kubenswrapper[4857]: I1128 13:41:29.445036 4857 generic.go:334] "Generic (PLEG): container finished" podID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerID="42fdf039da249011bb4f98ee0ccc5c75823dcc18060eaa7491fb2c3d14d51398" exitCode=0 Nov 28 13:41:29 crc kubenswrapper[4857]: I1128 13:41:29.445071 4857 generic.go:334] "Generic (PLEG): container finished" podID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerID="d1daa3e59875008df55b2c17a293c9828d76d615b9a0e170a5e1f7112130e884" exitCode=2 Nov 28 13:41:29 crc kubenswrapper[4857]: I1128 13:41:29.445082 4857 generic.go:334] "Generic (PLEG): container finished" podID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerID="efb20f4a4e327f203008825bf6990f8eb7c028b35f176a97f5ec6f6de2f09df8" exitCode=0 Nov 28 13:41:29 crc kubenswrapper[4857]: I1128 13:41:29.446152 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82de4c9c-25af-49e1-9579-f51ad9a3d4ec","Type":"ContainerDied","Data":"42fdf039da249011bb4f98ee0ccc5c75823dcc18060eaa7491fb2c3d14d51398"} Nov 28 13:41:29 crc kubenswrapper[4857]: I1128 13:41:29.446193 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82de4c9c-25af-49e1-9579-f51ad9a3d4ec","Type":"ContainerDied","Data":"d1daa3e59875008df55b2c17a293c9828d76d615b9a0e170a5e1f7112130e884"} Nov 28 13:41:29 crc kubenswrapper[4857]: I1128 13:41:29.446204 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82de4c9c-25af-49e1-9579-f51ad9a3d4ec","Type":"ContainerDied","Data":"efb20f4a4e327f203008825bf6990f8eb7c028b35f176a97f5ec6f6de2f09df8"} Nov 28 13:41:29 crc kubenswrapper[4857]: E1128 13:41:29.465632 4857 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod82de4c9c_25af_49e1_9579_f51ad9a3d4ec.slice/crio-conmon-efb20f4a4e327f203008825bf6990f8eb7c028b35f176a97f5ec6f6de2f09df8.scope\": RecentStats: unable to find data in memory cache]" Nov 28 13:41:29 crc kubenswrapper[4857]: I1128 13:41:29.915421 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.483202 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"07621208-d831-4470-908c-76084c830753","Type":"ContainerStarted","Data":"f53ded071ea01c8120bc4be89662f272bd544a870cac413afe982b426f8f618a"} Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.503122 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=1.6001692379999999 podStartE2EDuration="12.503105776s" podCreationTimestamp="2025-11-28 13:41:21 +0000 UTC" firstStartedPulling="2025-11-28 13:41:22.262772702 +0000 UTC m=+1374.290147869" lastFinishedPulling="2025-11-28 13:41:33.16570924 +0000 UTC m=+1385.193084407" observedRunningTime="2025-11-28 13:41:33.501206631 +0000 UTC m=+1385.528581798" watchObservedRunningTime="2025-11-28 13:41:33.503105776 +0000 UTC m=+1385.530480943" Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.773986 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.874474 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jfsr\" (UniqueName: \"kubernetes.io/projected/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-kube-api-access-4jfsr\") pod \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.874581 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-combined-ca-bundle\") pod \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.874638 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-run-httpd\") pod \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.875072 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "82de4c9c-25af-49e1-9579-f51ad9a3d4ec" (UID: "82de4c9c-25af-49e1-9579-f51ad9a3d4ec"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.875363 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-log-httpd\") pod \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.875600 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-sg-core-conf-yaml\") pod \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.875740 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-config-data\") pod \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.876164 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "82de4c9c-25af-49e1-9579-f51ad9a3d4ec" (UID: "82de4c9c-25af-49e1-9579-f51ad9a3d4ec"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.876199 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-scripts\") pod \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\" (UID: \"82de4c9c-25af-49e1-9579-f51ad9a3d4ec\") " Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.877057 4857 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.877078 4857 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.880719 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-scripts" (OuterVolumeSpecName: "scripts") pod "82de4c9c-25af-49e1-9579-f51ad9a3d4ec" (UID: "82de4c9c-25af-49e1-9579-f51ad9a3d4ec"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.881185 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-kube-api-access-4jfsr" (OuterVolumeSpecName: "kube-api-access-4jfsr") pod "82de4c9c-25af-49e1-9579-f51ad9a3d4ec" (UID: "82de4c9c-25af-49e1-9579-f51ad9a3d4ec"). InnerVolumeSpecName "kube-api-access-4jfsr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.900781 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "82de4c9c-25af-49e1-9579-f51ad9a3d4ec" (UID: "82de4c9c-25af-49e1-9579-f51ad9a3d4ec"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.984259 4857 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.984326 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:33 crc kubenswrapper[4857]: I1128 13:41:33.984341 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jfsr\" (UniqueName: \"kubernetes.io/projected/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-kube-api-access-4jfsr\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.026840 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "82de4c9c-25af-49e1-9579-f51ad9a3d4ec" (UID: "82de4c9c-25af-49e1-9579-f51ad9a3d4ec"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.034594 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-config-data" (OuterVolumeSpecName: "config-data") pod "82de4c9c-25af-49e1-9579-f51ad9a3d4ec" (UID: "82de4c9c-25af-49e1-9579-f51ad9a3d4ec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.086487 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.086525 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82de4c9c-25af-49e1-9579-f51ad9a3d4ec-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.496180 4857 generic.go:334] "Generic (PLEG): container finished" podID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerID="393dc477a432d224c8853c01ad6939e533a259e97bad1e67a93a0e7f77720a1c" exitCode=0 Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.496226 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82de4c9c-25af-49e1-9579-f51ad9a3d4ec","Type":"ContainerDied","Data":"393dc477a432d224c8853c01ad6939e533a259e97bad1e67a93a0e7f77720a1c"} Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.496261 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.496283 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82de4c9c-25af-49e1-9579-f51ad9a3d4ec","Type":"ContainerDied","Data":"4d9a08ff646eb5bd2226dddbd4509c2e93604521bae5a149cbfc47949d76d6c6"} Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.496306 4857 scope.go:117] "RemoveContainer" containerID="42fdf039da249011bb4f98ee0ccc5c75823dcc18060eaa7491fb2c3d14d51398" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.533311 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.534410 4857 scope.go:117] "RemoveContainer" containerID="d1daa3e59875008df55b2c17a293c9828d76d615b9a0e170a5e1f7112130e884" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.543200 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.561208 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:34 crc kubenswrapper[4857]: E1128 13:41:34.561646 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerName="ceilometer-central-agent" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.561673 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerName="ceilometer-central-agent" Nov 28 13:41:34 crc kubenswrapper[4857]: E1128 13:41:34.562033 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerName="proxy-httpd" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.562049 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerName="proxy-httpd" Nov 28 13:41:34 crc kubenswrapper[4857]: E1128 13:41:34.562065 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerName="ceilometer-notification-agent" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.562075 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerName="ceilometer-notification-agent" Nov 28 13:41:34 crc kubenswrapper[4857]: E1128 13:41:34.562085 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerName="sg-core" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.562091 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerName="sg-core" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.562298 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerName="proxy-httpd" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.562317 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerName="ceilometer-notification-agent" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.562335 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerName="sg-core" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.562346 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" containerName="ceilometer-central-agent" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.563672 4857 scope.go:117] "RemoveContainer" containerID="393dc477a432d224c8853c01ad6939e533a259e97bad1e67a93a0e7f77720a1c" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.563889 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.566367 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.566644 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.577634 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.605557 4857 scope.go:117] "RemoveContainer" containerID="efb20f4a4e327f203008825bf6990f8eb7c028b35f176a97f5ec6f6de2f09df8" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.627953 4857 scope.go:117] "RemoveContainer" containerID="42fdf039da249011bb4f98ee0ccc5c75823dcc18060eaa7491fb2c3d14d51398" Nov 28 13:41:34 crc kubenswrapper[4857]: E1128 13:41:34.628668 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42fdf039da249011bb4f98ee0ccc5c75823dcc18060eaa7491fb2c3d14d51398\": container with ID starting with 42fdf039da249011bb4f98ee0ccc5c75823dcc18060eaa7491fb2c3d14d51398 not found: ID does not exist" containerID="42fdf039da249011bb4f98ee0ccc5c75823dcc18060eaa7491fb2c3d14d51398" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.628728 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42fdf039da249011bb4f98ee0ccc5c75823dcc18060eaa7491fb2c3d14d51398"} err="failed to get container status \"42fdf039da249011bb4f98ee0ccc5c75823dcc18060eaa7491fb2c3d14d51398\": rpc error: code = NotFound desc = could not find container \"42fdf039da249011bb4f98ee0ccc5c75823dcc18060eaa7491fb2c3d14d51398\": container with ID starting with 42fdf039da249011bb4f98ee0ccc5c75823dcc18060eaa7491fb2c3d14d51398 not found: ID does not exist" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.628814 4857 scope.go:117] "RemoveContainer" containerID="d1daa3e59875008df55b2c17a293c9828d76d615b9a0e170a5e1f7112130e884" Nov 28 13:41:34 crc kubenswrapper[4857]: E1128 13:41:34.629325 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1daa3e59875008df55b2c17a293c9828d76d615b9a0e170a5e1f7112130e884\": container with ID starting with d1daa3e59875008df55b2c17a293c9828d76d615b9a0e170a5e1f7112130e884 not found: ID does not exist" containerID="d1daa3e59875008df55b2c17a293c9828d76d615b9a0e170a5e1f7112130e884" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.629370 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1daa3e59875008df55b2c17a293c9828d76d615b9a0e170a5e1f7112130e884"} err="failed to get container status \"d1daa3e59875008df55b2c17a293c9828d76d615b9a0e170a5e1f7112130e884\": rpc error: code = NotFound desc = could not find container \"d1daa3e59875008df55b2c17a293c9828d76d615b9a0e170a5e1f7112130e884\": container with ID starting with d1daa3e59875008df55b2c17a293c9828d76d615b9a0e170a5e1f7112130e884 not found: ID does not exist" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.629401 4857 scope.go:117] "RemoveContainer" containerID="393dc477a432d224c8853c01ad6939e533a259e97bad1e67a93a0e7f77720a1c" Nov 28 13:41:34 crc kubenswrapper[4857]: E1128 13:41:34.629794 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"393dc477a432d224c8853c01ad6939e533a259e97bad1e67a93a0e7f77720a1c\": container with ID starting with 393dc477a432d224c8853c01ad6939e533a259e97bad1e67a93a0e7f77720a1c not found: ID does not exist" containerID="393dc477a432d224c8853c01ad6939e533a259e97bad1e67a93a0e7f77720a1c" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.629835 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"393dc477a432d224c8853c01ad6939e533a259e97bad1e67a93a0e7f77720a1c"} err="failed to get container status \"393dc477a432d224c8853c01ad6939e533a259e97bad1e67a93a0e7f77720a1c\": rpc error: code = NotFound desc = could not find container \"393dc477a432d224c8853c01ad6939e533a259e97bad1e67a93a0e7f77720a1c\": container with ID starting with 393dc477a432d224c8853c01ad6939e533a259e97bad1e67a93a0e7f77720a1c not found: ID does not exist" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.629865 4857 scope.go:117] "RemoveContainer" containerID="efb20f4a4e327f203008825bf6990f8eb7c028b35f176a97f5ec6f6de2f09df8" Nov 28 13:41:34 crc kubenswrapper[4857]: E1128 13:41:34.630201 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"efb20f4a4e327f203008825bf6990f8eb7c028b35f176a97f5ec6f6de2f09df8\": container with ID starting with efb20f4a4e327f203008825bf6990f8eb7c028b35f176a97f5ec6f6de2f09df8 not found: ID does not exist" containerID="efb20f4a4e327f203008825bf6990f8eb7c028b35f176a97f5ec6f6de2f09df8" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.630228 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"efb20f4a4e327f203008825bf6990f8eb7c028b35f176a97f5ec6f6de2f09df8"} err="failed to get container status \"efb20f4a4e327f203008825bf6990f8eb7c028b35f176a97f5ec6f6de2f09df8\": rpc error: code = NotFound desc = could not find container \"efb20f4a4e327f203008825bf6990f8eb7c028b35f176a97f5ec6f6de2f09df8\": container with ID starting with efb20f4a4e327f203008825bf6990f8eb7c028b35f176a97f5ec6f6de2f09df8 not found: ID does not exist" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.697193 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.697257 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-scripts\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.697284 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-config-data\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.697319 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.697351 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjrwq\" (UniqueName: \"kubernetes.io/projected/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-kube-api-access-wjrwq\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.697411 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-run-httpd\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.697439 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-log-httpd\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.799109 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjrwq\" (UniqueName: \"kubernetes.io/projected/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-kube-api-access-wjrwq\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.799419 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-run-httpd\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.799522 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-log-httpd\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.799674 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.799778 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-scripts\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.799865 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-config-data\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.799965 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.800159 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-run-httpd\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.800207 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-log-httpd\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.803592 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.804699 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.805228 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-scripts\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.815702 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjrwq\" (UniqueName: \"kubernetes.io/projected/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-kube-api-access-wjrwq\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.815937 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-config-data\") pod \"ceilometer-0\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " pod="openstack/ceilometer-0" Nov 28 13:41:34 crc kubenswrapper[4857]: I1128 13:41:34.885182 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:41:35 crc kubenswrapper[4857]: I1128 13:41:35.343248 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:35 crc kubenswrapper[4857]: W1128 13:41:35.346550 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7fc1f59_9d53_4da9_9eed_1c0d4ba10bf3.slice/crio-17e9ff36675d97ead2c2a4a69b1937a195122e05caf462a349aacfcee40ce0e2 WatchSource:0}: Error finding container 17e9ff36675d97ead2c2a4a69b1937a195122e05caf462a349aacfcee40ce0e2: Status 404 returned error can't find the container with id 17e9ff36675d97ead2c2a4a69b1937a195122e05caf462a349aacfcee40ce0e2 Nov 28 13:41:35 crc kubenswrapper[4857]: I1128 13:41:35.507549 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3","Type":"ContainerStarted","Data":"17e9ff36675d97ead2c2a4a69b1937a195122e05caf462a349aacfcee40ce0e2"} Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.320843 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82de4c9c-25af-49e1-9579-f51ad9a3d4ec" path="/var/lib/kubelet/pods/82de4c9c-25af-49e1-9579-f51ad9a3d4ec/volumes" Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.414653 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.418152 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.606766 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.607046 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" containerName="glance-log" containerID="cri-o://df9b47ff8198bdafa7125b2e7a080b861355bfe30230d7f8a3ec52fcee86264f" gracePeriod=30 Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.607533 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" containerName="glance-httpd" containerID="cri-o://e54d67a38fbee3a524172c908eaa8f9b96b59b1bd613edfba5494e6dc6f75b3e" gracePeriod=30 Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.700592 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-h28xd"] Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.701912 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-h28xd" Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.709652 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-h28xd"] Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.812290 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-57bwd"] Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.813672 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-57bwd" Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.859089 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1df1224d-12ff-4f52-bdbe-533b53f8991c-operator-scripts\") pod \"nova-api-db-create-h28xd\" (UID: \"1df1224d-12ff-4f52-bdbe-533b53f8991c\") " pod="openstack/nova-api-db-create-h28xd" Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.859168 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xw6m\" (UniqueName: \"kubernetes.io/projected/1df1224d-12ff-4f52-bdbe-533b53f8991c-kube-api-access-8xw6m\") pod \"nova-api-db-create-h28xd\" (UID: \"1df1224d-12ff-4f52-bdbe-533b53f8991c\") " pod="openstack/nova-api-db-create-h28xd" Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.900813 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-7cc9-account-create-update-fvpgx"] Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.902063 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7cc9-account-create-update-fvpgx" Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.903137 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-57bwd"] Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.904995 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.913105 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-7cc9-account-create-update-fvpgx"] Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.961000 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52m2s\" (UniqueName: \"kubernetes.io/projected/205a6e10-4a06-4d87-b90f-5787e68be49d-kube-api-access-52m2s\") pod \"nova-cell0-db-create-57bwd\" (UID: \"205a6e10-4a06-4d87-b90f-5787e68be49d\") " pod="openstack/nova-cell0-db-create-57bwd" Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.961088 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/205a6e10-4a06-4d87-b90f-5787e68be49d-operator-scripts\") pod \"nova-cell0-db-create-57bwd\" (UID: \"205a6e10-4a06-4d87-b90f-5787e68be49d\") " pod="openstack/nova-cell0-db-create-57bwd" Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.961128 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8bf5691-6407-4038-8a23-3d562ec05262-operator-scripts\") pod \"nova-api-7cc9-account-create-update-fvpgx\" (UID: \"a8bf5691-6407-4038-8a23-3d562ec05262\") " pod="openstack/nova-api-7cc9-account-create-update-fvpgx" Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.961186 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1df1224d-12ff-4f52-bdbe-533b53f8991c-operator-scripts\") pod \"nova-api-db-create-h28xd\" (UID: \"1df1224d-12ff-4f52-bdbe-533b53f8991c\") " pod="openstack/nova-api-db-create-h28xd" Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.961232 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zk7z5\" (UniqueName: \"kubernetes.io/projected/a8bf5691-6407-4038-8a23-3d562ec05262-kube-api-access-zk7z5\") pod \"nova-api-7cc9-account-create-update-fvpgx\" (UID: \"a8bf5691-6407-4038-8a23-3d562ec05262\") " pod="openstack/nova-api-7cc9-account-create-update-fvpgx" Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.961254 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xw6m\" (UniqueName: \"kubernetes.io/projected/1df1224d-12ff-4f52-bdbe-533b53f8991c-kube-api-access-8xw6m\") pod \"nova-api-db-create-h28xd\" (UID: \"1df1224d-12ff-4f52-bdbe-533b53f8991c\") " pod="openstack/nova-api-db-create-h28xd" Nov 28 13:41:36 crc kubenswrapper[4857]: I1128 13:41:36.962407 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1df1224d-12ff-4f52-bdbe-533b53f8991c-operator-scripts\") pod \"nova-api-db-create-h28xd\" (UID: \"1df1224d-12ff-4f52-bdbe-533b53f8991c\") " pod="openstack/nova-api-db-create-h28xd" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.009322 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xw6m\" (UniqueName: \"kubernetes.io/projected/1df1224d-12ff-4f52-bdbe-533b53f8991c-kube-api-access-8xw6m\") pod \"nova-api-db-create-h28xd\" (UID: \"1df1224d-12ff-4f52-bdbe-533b53f8991c\") " pod="openstack/nova-api-db-create-h28xd" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.012891 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-m8bx4"] Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.015977 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-m8bx4" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.029434 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-32fc-account-create-update-vvdvc"] Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.030458 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-32fc-account-create-update-vvdvc" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.032376 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.063959 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/205a6e10-4a06-4d87-b90f-5787e68be49d-operator-scripts\") pod \"nova-cell0-db-create-57bwd\" (UID: \"205a6e10-4a06-4d87-b90f-5787e68be49d\") " pod="openstack/nova-cell0-db-create-57bwd" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.064006 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8bf5691-6407-4038-8a23-3d562ec05262-operator-scripts\") pod \"nova-api-7cc9-account-create-update-fvpgx\" (UID: \"a8bf5691-6407-4038-8a23-3d562ec05262\") " pod="openstack/nova-api-7cc9-account-create-update-fvpgx" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.064082 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zk7z5\" (UniqueName: \"kubernetes.io/projected/a8bf5691-6407-4038-8a23-3d562ec05262-kube-api-access-zk7z5\") pod \"nova-api-7cc9-account-create-update-fvpgx\" (UID: \"a8bf5691-6407-4038-8a23-3d562ec05262\") " pod="openstack/nova-api-7cc9-account-create-update-fvpgx" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.064121 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52m2s\" (UniqueName: \"kubernetes.io/projected/205a6e10-4a06-4d87-b90f-5787e68be49d-kube-api-access-52m2s\") pod \"nova-cell0-db-create-57bwd\" (UID: \"205a6e10-4a06-4d87-b90f-5787e68be49d\") " pod="openstack/nova-cell0-db-create-57bwd" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.064918 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/205a6e10-4a06-4d87-b90f-5787e68be49d-operator-scripts\") pod \"nova-cell0-db-create-57bwd\" (UID: \"205a6e10-4a06-4d87-b90f-5787e68be49d\") " pod="openstack/nova-cell0-db-create-57bwd" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.065362 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8bf5691-6407-4038-8a23-3d562ec05262-operator-scripts\") pod \"nova-api-7cc9-account-create-update-fvpgx\" (UID: \"a8bf5691-6407-4038-8a23-3d562ec05262\") " pod="openstack/nova-api-7cc9-account-create-update-fvpgx" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.077723 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-h28xd" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.098351 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zk7z5\" (UniqueName: \"kubernetes.io/projected/a8bf5691-6407-4038-8a23-3d562ec05262-kube-api-access-zk7z5\") pod \"nova-api-7cc9-account-create-update-fvpgx\" (UID: \"a8bf5691-6407-4038-8a23-3d562ec05262\") " pod="openstack/nova-api-7cc9-account-create-update-fvpgx" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.101162 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-32fc-account-create-update-vvdvc"] Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.118893 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-m8bx4"] Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.120367 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52m2s\" (UniqueName: \"kubernetes.io/projected/205a6e10-4a06-4d87-b90f-5787e68be49d-kube-api-access-52m2s\") pod \"nova-cell0-db-create-57bwd\" (UID: \"205a6e10-4a06-4d87-b90f-5787e68be49d\") " pod="openstack/nova-cell0-db-create-57bwd" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.165545 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ff6d87b7-1400-461b-ab0c-e122e6e2a5e5-operator-scripts\") pod \"nova-cell1-db-create-m8bx4\" (UID: \"ff6d87b7-1400-461b-ab0c-e122e6e2a5e5\") " pod="openstack/nova-cell1-db-create-m8bx4" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.165596 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mndj\" (UniqueName: \"kubernetes.io/projected/ff6d87b7-1400-461b-ab0c-e122e6e2a5e5-kube-api-access-9mndj\") pod \"nova-cell1-db-create-m8bx4\" (UID: \"ff6d87b7-1400-461b-ab0c-e122e6e2a5e5\") " pod="openstack/nova-cell1-db-create-m8bx4" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.165941 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28bq9\" (UniqueName: \"kubernetes.io/projected/2b16d37a-3848-4af5-a224-1e50a611c2d7-kube-api-access-28bq9\") pod \"nova-cell0-32fc-account-create-update-vvdvc\" (UID: \"2b16d37a-3848-4af5-a224-1e50a611c2d7\") " pod="openstack/nova-cell0-32fc-account-create-update-vvdvc" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.166113 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b16d37a-3848-4af5-a224-1e50a611c2d7-operator-scripts\") pod \"nova-cell0-32fc-account-create-update-vvdvc\" (UID: \"2b16d37a-3848-4af5-a224-1e50a611c2d7\") " pod="openstack/nova-cell0-32fc-account-create-update-vvdvc" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.172552 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-57bwd" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.234108 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7cc9-account-create-update-fvpgx" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.267775 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28bq9\" (UniqueName: \"kubernetes.io/projected/2b16d37a-3848-4af5-a224-1e50a611c2d7-kube-api-access-28bq9\") pod \"nova-cell0-32fc-account-create-update-vvdvc\" (UID: \"2b16d37a-3848-4af5-a224-1e50a611c2d7\") " pod="openstack/nova-cell0-32fc-account-create-update-vvdvc" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.267871 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b16d37a-3848-4af5-a224-1e50a611c2d7-operator-scripts\") pod \"nova-cell0-32fc-account-create-update-vvdvc\" (UID: \"2b16d37a-3848-4af5-a224-1e50a611c2d7\") " pod="openstack/nova-cell0-32fc-account-create-update-vvdvc" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.267909 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ff6d87b7-1400-461b-ab0c-e122e6e2a5e5-operator-scripts\") pod \"nova-cell1-db-create-m8bx4\" (UID: \"ff6d87b7-1400-461b-ab0c-e122e6e2a5e5\") " pod="openstack/nova-cell1-db-create-m8bx4" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.267929 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mndj\" (UniqueName: \"kubernetes.io/projected/ff6d87b7-1400-461b-ab0c-e122e6e2a5e5-kube-api-access-9mndj\") pod \"nova-cell1-db-create-m8bx4\" (UID: \"ff6d87b7-1400-461b-ab0c-e122e6e2a5e5\") " pod="openstack/nova-cell1-db-create-m8bx4" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.269021 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ff6d87b7-1400-461b-ab0c-e122e6e2a5e5-operator-scripts\") pod \"nova-cell1-db-create-m8bx4\" (UID: \"ff6d87b7-1400-461b-ab0c-e122e6e2a5e5\") " pod="openstack/nova-cell1-db-create-m8bx4" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.269828 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b16d37a-3848-4af5-a224-1e50a611c2d7-operator-scripts\") pod \"nova-cell0-32fc-account-create-update-vvdvc\" (UID: \"2b16d37a-3848-4af5-a224-1e50a611c2d7\") " pod="openstack/nova-cell0-32fc-account-create-update-vvdvc" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.295295 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mndj\" (UniqueName: \"kubernetes.io/projected/ff6d87b7-1400-461b-ab0c-e122e6e2a5e5-kube-api-access-9mndj\") pod \"nova-cell1-db-create-m8bx4\" (UID: \"ff6d87b7-1400-461b-ab0c-e122e6e2a5e5\") " pod="openstack/nova-cell1-db-create-m8bx4" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.306476 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28bq9\" (UniqueName: \"kubernetes.io/projected/2b16d37a-3848-4af5-a224-1e50a611c2d7-kube-api-access-28bq9\") pod \"nova-cell0-32fc-account-create-update-vvdvc\" (UID: \"2b16d37a-3848-4af5-a224-1e50a611c2d7\") " pod="openstack/nova-cell0-32fc-account-create-update-vvdvc" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.327113 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-0c56-account-create-update-6lpj4"] Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.328193 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0c56-account-create-update-6lpj4" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.333904 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.364948 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-0c56-account-create-update-6lpj4"] Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.365611 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-m8bx4" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.370614 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-32fc-account-create-update-vvdvc" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.470799 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7c62325d-5f8d-4477-9369-5a39a3a0bfc8-operator-scripts\") pod \"nova-cell1-0c56-account-create-update-6lpj4\" (UID: \"7c62325d-5f8d-4477-9369-5a39a3a0bfc8\") " pod="openstack/nova-cell1-0c56-account-create-update-6lpj4" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.471206 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-987xv\" (UniqueName: \"kubernetes.io/projected/7c62325d-5f8d-4477-9369-5a39a3a0bfc8-kube-api-access-987xv\") pod \"nova-cell1-0c56-account-create-update-6lpj4\" (UID: \"7c62325d-5f8d-4477-9369-5a39a3a0bfc8\") " pod="openstack/nova-cell1-0c56-account-create-update-6lpj4" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.544150 4857 generic.go:334] "Generic (PLEG): container finished" podID="87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" containerID="df9b47ff8198bdafa7125b2e7a080b861355bfe30230d7f8a3ec52fcee86264f" exitCode=143 Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.544395 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa","Type":"ContainerDied","Data":"df9b47ff8198bdafa7125b2e7a080b861355bfe30230d7f8a3ec52fcee86264f"} Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.546559 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3","Type":"ContainerStarted","Data":"9d585ad6ed5c1ed2d4084a6c16dcddd38749967ad5d1ed0f89f762b91336348a"} Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.572787 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7c62325d-5f8d-4477-9369-5a39a3a0bfc8-operator-scripts\") pod \"nova-cell1-0c56-account-create-update-6lpj4\" (UID: \"7c62325d-5f8d-4477-9369-5a39a3a0bfc8\") " pod="openstack/nova-cell1-0c56-account-create-update-6lpj4" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.572857 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-987xv\" (UniqueName: \"kubernetes.io/projected/7c62325d-5f8d-4477-9369-5a39a3a0bfc8-kube-api-access-987xv\") pod \"nova-cell1-0c56-account-create-update-6lpj4\" (UID: \"7c62325d-5f8d-4477-9369-5a39a3a0bfc8\") " pod="openstack/nova-cell1-0c56-account-create-update-6lpj4" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.573563 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7c62325d-5f8d-4477-9369-5a39a3a0bfc8-operator-scripts\") pod \"nova-cell1-0c56-account-create-update-6lpj4\" (UID: \"7c62325d-5f8d-4477-9369-5a39a3a0bfc8\") " pod="openstack/nova-cell1-0c56-account-create-update-6lpj4" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.594394 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-987xv\" (UniqueName: \"kubernetes.io/projected/7c62325d-5f8d-4477-9369-5a39a3a0bfc8-kube-api-access-987xv\") pod \"nova-cell1-0c56-account-create-update-6lpj4\" (UID: \"7c62325d-5f8d-4477-9369-5a39a3a0bfc8\") " pod="openstack/nova-cell1-0c56-account-create-update-6lpj4" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.650440 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-h28xd"] Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.659033 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0c56-account-create-update-6lpj4" Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.740073 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-57bwd"] Nov 28 13:41:37 crc kubenswrapper[4857]: I1128 13:41:37.922761 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-7cc9-account-create-update-fvpgx"] Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.066466 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-m8bx4"] Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.072620 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-32fc-account-create-update-vvdvc"] Nov 28 13:41:38 crc kubenswrapper[4857]: W1128 13:41:38.099610 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b16d37a_3848_4af5_a224_1e50a611c2d7.slice/crio-0f1b6b97b8fa60ff21a8f870fc36e9488006b88d01e0161d613c2497d7ea30b8 WatchSource:0}: Error finding container 0f1b6b97b8fa60ff21a8f870fc36e9488006b88d01e0161d613c2497d7ea30b8: Status 404 returned error can't find the container with id 0f1b6b97b8fa60ff21a8f870fc36e9488006b88d01e0161d613c2497d7ea30b8 Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.208744 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.209192 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="aa5c6527-63ae-4b20-b497-8b7abe609110" containerName="glance-httpd" containerID="cri-o://c065e75bcff4982d3c580553c3075a92f37176a3153b6c5c8ecbfbaecc74b5c5" gracePeriod=30 Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.209531 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="aa5c6527-63ae-4b20-b497-8b7abe609110" containerName="glance-log" containerID="cri-o://2e90c406f3ce5f6e243a58b3a523f5bbbd5c50f0ea05fb2946f569018ff927c0" gracePeriod=30 Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.274121 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-0c56-account-create-update-6lpj4"] Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.560697 4857 generic.go:334] "Generic (PLEG): container finished" podID="aa5c6527-63ae-4b20-b497-8b7abe609110" containerID="2e90c406f3ce5f6e243a58b3a523f5bbbd5c50f0ea05fb2946f569018ff927c0" exitCode=143 Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.561009 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"aa5c6527-63ae-4b20-b497-8b7abe609110","Type":"ContainerDied","Data":"2e90c406f3ce5f6e243a58b3a523f5bbbd5c50f0ea05fb2946f569018ff927c0"} Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.562167 4857 generic.go:334] "Generic (PLEG): container finished" podID="1df1224d-12ff-4f52-bdbe-533b53f8991c" containerID="6f648c5f8eb830af4905c6385054384b6b5cee1dbbb4c91beefc91f8c133206b" exitCode=0 Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.562237 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-h28xd" event={"ID":"1df1224d-12ff-4f52-bdbe-533b53f8991c","Type":"ContainerDied","Data":"6f648c5f8eb830af4905c6385054384b6b5cee1dbbb4c91beefc91f8c133206b"} Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.562254 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-h28xd" event={"ID":"1df1224d-12ff-4f52-bdbe-533b53f8991c","Type":"ContainerStarted","Data":"8ff9608f12ccce9144a4783111b5c1817b6059daf804a44b612f587d12d5b59b"} Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.566242 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-32fc-account-create-update-vvdvc" event={"ID":"2b16d37a-3848-4af5-a224-1e50a611c2d7","Type":"ContainerStarted","Data":"afe50b0f6d97ab59e3e2e2925c1ad50286d76661f30419d74579b320e2624da7"} Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.566287 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-32fc-account-create-update-vvdvc" event={"ID":"2b16d37a-3848-4af5-a224-1e50a611c2d7","Type":"ContainerStarted","Data":"0f1b6b97b8fa60ff21a8f870fc36e9488006b88d01e0161d613c2497d7ea30b8"} Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.568262 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-m8bx4" event={"ID":"ff6d87b7-1400-461b-ab0c-e122e6e2a5e5","Type":"ContainerStarted","Data":"71ae9858cfed8aa14244777c5eeb1cd2345f201203894a017134e488b8f0b243"} Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.568328 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-m8bx4" event={"ID":"ff6d87b7-1400-461b-ab0c-e122e6e2a5e5","Type":"ContainerStarted","Data":"a4e62f71df43d167e5d1d73854c435880772e7f03b520c1ccfbd87f29c99ed30"} Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.569907 4857 generic.go:334] "Generic (PLEG): container finished" podID="a8bf5691-6407-4038-8a23-3d562ec05262" containerID="442c14282bda41626b88d6fa475b7be9e797bd11222890dc05a9fb4d318de55f" exitCode=0 Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.569947 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7cc9-account-create-update-fvpgx" event={"ID":"a8bf5691-6407-4038-8a23-3d562ec05262","Type":"ContainerDied","Data":"442c14282bda41626b88d6fa475b7be9e797bd11222890dc05a9fb4d318de55f"} Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.569969 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7cc9-account-create-update-fvpgx" event={"ID":"a8bf5691-6407-4038-8a23-3d562ec05262","Type":"ContainerStarted","Data":"e5fb5f8f51ecf0fcd5c21f5cb18a89f79502d9fad7566c4505013c3818f6e773"} Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.571283 4857 generic.go:334] "Generic (PLEG): container finished" podID="205a6e10-4a06-4d87-b90f-5787e68be49d" containerID="b9321f2b63f3869f3d5de215cccfae2beee129e35b1ddcfa2bb4212dbb778dac" exitCode=0 Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.571325 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-57bwd" event={"ID":"205a6e10-4a06-4d87-b90f-5787e68be49d","Type":"ContainerDied","Data":"b9321f2b63f3869f3d5de215cccfae2beee129e35b1ddcfa2bb4212dbb778dac"} Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.571339 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-57bwd" event={"ID":"205a6e10-4a06-4d87-b90f-5787e68be49d","Type":"ContainerStarted","Data":"2311c10af88d66e64a34f946c025eaf943110f167a9dc7b9549e87bf816921b9"} Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.577719 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3","Type":"ContainerStarted","Data":"31fb9037fd19e39c1ea46708082033162678704f4a0f4ad2fd303ff13e532e29"} Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.581835 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-0c56-account-create-update-6lpj4" event={"ID":"7c62325d-5f8d-4477-9369-5a39a3a0bfc8","Type":"ContainerStarted","Data":"88fa49c3f289e73d676dc73cf57ee9db5632204f69f9b2c03c7616194fb7346f"} Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.582008 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-0c56-account-create-update-6lpj4" event={"ID":"7c62325d-5f8d-4477-9369-5a39a3a0bfc8","Type":"ContainerStarted","Data":"cfae693de14c4a4b74069e67a842e64346b11ba40442eec93645850c8d22cc1f"} Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.593121 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-m8bx4" podStartSLOduration=2.593103097 podStartE2EDuration="2.593103097s" podCreationTimestamp="2025-11-28 13:41:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:41:38.587128365 +0000 UTC m=+1390.614503532" watchObservedRunningTime="2025-11-28 13:41:38.593103097 +0000 UTC m=+1390.620478264" Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.607678 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-32fc-account-create-update-vvdvc" podStartSLOduration=2.607661455 podStartE2EDuration="2.607661455s" podCreationTimestamp="2025-11-28 13:41:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:41:38.607213933 +0000 UTC m=+1390.634589090" watchObservedRunningTime="2025-11-28 13:41:38.607661455 +0000 UTC m=+1390.635036622" Nov 28 13:41:38 crc kubenswrapper[4857]: I1128 13:41:38.660261 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-0c56-account-create-update-6lpj4" podStartSLOduration=1.660245988 podStartE2EDuration="1.660245988s" podCreationTimestamp="2025-11-28 13:41:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:41:38.656384877 +0000 UTC m=+1390.683760054" watchObservedRunningTime="2025-11-28 13:41:38.660245988 +0000 UTC m=+1390.687621155" Nov 28 13:41:39 crc kubenswrapper[4857]: I1128 13:41:39.603308 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3","Type":"ContainerStarted","Data":"d79d9b2079c8d418c02ab7fa7ad5efa4707c05b7a47d13e0ad94309dd61e672a"} Nov 28 13:41:39 crc kubenswrapper[4857]: I1128 13:41:39.611895 4857 generic.go:334] "Generic (PLEG): container finished" podID="7c62325d-5f8d-4477-9369-5a39a3a0bfc8" containerID="88fa49c3f289e73d676dc73cf57ee9db5632204f69f9b2c03c7616194fb7346f" exitCode=0 Nov 28 13:41:39 crc kubenswrapper[4857]: I1128 13:41:39.611975 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-0c56-account-create-update-6lpj4" event={"ID":"7c62325d-5f8d-4477-9369-5a39a3a0bfc8","Type":"ContainerDied","Data":"88fa49c3f289e73d676dc73cf57ee9db5632204f69f9b2c03c7616194fb7346f"} Nov 28 13:41:39 crc kubenswrapper[4857]: I1128 13:41:39.616647 4857 generic.go:334] "Generic (PLEG): container finished" podID="2b16d37a-3848-4af5-a224-1e50a611c2d7" containerID="afe50b0f6d97ab59e3e2e2925c1ad50286d76661f30419d74579b320e2624da7" exitCode=0 Nov 28 13:41:39 crc kubenswrapper[4857]: I1128 13:41:39.616729 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-32fc-account-create-update-vvdvc" event={"ID":"2b16d37a-3848-4af5-a224-1e50a611c2d7","Type":"ContainerDied","Data":"afe50b0f6d97ab59e3e2e2925c1ad50286d76661f30419d74579b320e2624da7"} Nov 28 13:41:39 crc kubenswrapper[4857]: I1128 13:41:39.619133 4857 generic.go:334] "Generic (PLEG): container finished" podID="ff6d87b7-1400-461b-ab0c-e122e6e2a5e5" containerID="71ae9858cfed8aa14244777c5eeb1cd2345f201203894a017134e488b8f0b243" exitCode=0 Nov 28 13:41:39 crc kubenswrapper[4857]: I1128 13:41:39.619279 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-m8bx4" event={"ID":"ff6d87b7-1400-461b-ab0c-e122e6e2a5e5","Type":"ContainerDied","Data":"71ae9858cfed8aa14244777c5eeb1cd2345f201203894a017134e488b8f0b243"} Nov 28 13:41:39 crc kubenswrapper[4857]: I1128 13:41:39.885366 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.205208 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-h28xd" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.211059 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7cc9-account-create-update-fvpgx" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.236900 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zk7z5\" (UniqueName: \"kubernetes.io/projected/a8bf5691-6407-4038-8a23-3d562ec05262-kube-api-access-zk7z5\") pod \"a8bf5691-6407-4038-8a23-3d562ec05262\" (UID: \"a8bf5691-6407-4038-8a23-3d562ec05262\") " Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.236977 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xw6m\" (UniqueName: \"kubernetes.io/projected/1df1224d-12ff-4f52-bdbe-533b53f8991c-kube-api-access-8xw6m\") pod \"1df1224d-12ff-4f52-bdbe-533b53f8991c\" (UID: \"1df1224d-12ff-4f52-bdbe-533b53f8991c\") " Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.237032 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1df1224d-12ff-4f52-bdbe-533b53f8991c-operator-scripts\") pod \"1df1224d-12ff-4f52-bdbe-533b53f8991c\" (UID: \"1df1224d-12ff-4f52-bdbe-533b53f8991c\") " Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.237051 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8bf5691-6407-4038-8a23-3d562ec05262-operator-scripts\") pod \"a8bf5691-6407-4038-8a23-3d562ec05262\" (UID: \"a8bf5691-6407-4038-8a23-3d562ec05262\") " Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.238111 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8bf5691-6407-4038-8a23-3d562ec05262-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a8bf5691-6407-4038-8a23-3d562ec05262" (UID: "a8bf5691-6407-4038-8a23-3d562ec05262"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.240062 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1df1224d-12ff-4f52-bdbe-533b53f8991c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1df1224d-12ff-4f52-bdbe-533b53f8991c" (UID: "1df1224d-12ff-4f52-bdbe-533b53f8991c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.246408 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-57bwd" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.246514 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8bf5691-6407-4038-8a23-3d562ec05262-kube-api-access-zk7z5" (OuterVolumeSpecName: "kube-api-access-zk7z5") pod "a8bf5691-6407-4038-8a23-3d562ec05262" (UID: "a8bf5691-6407-4038-8a23-3d562ec05262"). InnerVolumeSpecName "kube-api-access-zk7z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.247975 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1df1224d-12ff-4f52-bdbe-533b53f8991c-kube-api-access-8xw6m" (OuterVolumeSpecName: "kube-api-access-8xw6m") pod "1df1224d-12ff-4f52-bdbe-533b53f8991c" (UID: "1df1224d-12ff-4f52-bdbe-533b53f8991c"). InnerVolumeSpecName "kube-api-access-8xw6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.338892 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/205a6e10-4a06-4d87-b90f-5787e68be49d-operator-scripts\") pod \"205a6e10-4a06-4d87-b90f-5787e68be49d\" (UID: \"205a6e10-4a06-4d87-b90f-5787e68be49d\") " Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.339092 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-52m2s\" (UniqueName: \"kubernetes.io/projected/205a6e10-4a06-4d87-b90f-5787e68be49d-kube-api-access-52m2s\") pod \"205a6e10-4a06-4d87-b90f-5787e68be49d\" (UID: \"205a6e10-4a06-4d87-b90f-5787e68be49d\") " Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.339667 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zk7z5\" (UniqueName: \"kubernetes.io/projected/a8bf5691-6407-4038-8a23-3d562ec05262-kube-api-access-zk7z5\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.339683 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xw6m\" (UniqueName: \"kubernetes.io/projected/1df1224d-12ff-4f52-bdbe-533b53f8991c-kube-api-access-8xw6m\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.339693 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1df1224d-12ff-4f52-bdbe-533b53f8991c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.339703 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8bf5691-6407-4038-8a23-3d562ec05262-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.340424 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/205a6e10-4a06-4d87-b90f-5787e68be49d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "205a6e10-4a06-4d87-b90f-5787e68be49d" (UID: "205a6e10-4a06-4d87-b90f-5787e68be49d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.347490 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/205a6e10-4a06-4d87-b90f-5787e68be49d-kube-api-access-52m2s" (OuterVolumeSpecName: "kube-api-access-52m2s") pod "205a6e10-4a06-4d87-b90f-5787e68be49d" (UID: "205a6e10-4a06-4d87-b90f-5787e68be49d"). InnerVolumeSpecName "kube-api-access-52m2s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.353080 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.440405 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.440470 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-public-tls-certs\") pod \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.440582 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-logs\") pod \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.440634 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-config-data\") pod \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.440672 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fzptm\" (UniqueName: \"kubernetes.io/projected/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-kube-api-access-fzptm\") pod \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.440737 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-scripts\") pod \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.440802 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-httpd-run\") pod \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.440886 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-combined-ca-bundle\") pod \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\" (UID: \"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa\") " Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.441333 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-logs" (OuterVolumeSpecName: "logs") pod "87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" (UID: "87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.441385 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-52m2s\" (UniqueName: \"kubernetes.io/projected/205a6e10-4a06-4d87-b90f-5787e68be49d-kube-api-access-52m2s\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.441404 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/205a6e10-4a06-4d87-b90f-5787e68be49d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.444979 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" (UID: "87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.448182 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" (UID: "87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.448311 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-scripts" (OuterVolumeSpecName: "scripts") pod "87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" (UID: "87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.449903 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-kube-api-access-fzptm" (OuterVolumeSpecName: "kube-api-access-fzptm") pod "87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" (UID: "87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa"). InnerVolumeSpecName "kube-api-access-fzptm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.489035 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" (UID: "87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.513979 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-config-data" (OuterVolumeSpecName: "config-data") pod "87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" (UID: "87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.533870 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" (UID: "87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.543231 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.543262 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fzptm\" (UniqueName: \"kubernetes.io/projected/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-kube-api-access-fzptm\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.543271 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.543278 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.543286 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.543313 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.543323 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.543332 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.573691 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.632006 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-h28xd" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.631996 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-h28xd" event={"ID":"1df1224d-12ff-4f52-bdbe-533b53f8991c","Type":"ContainerDied","Data":"8ff9608f12ccce9144a4783111b5c1817b6059daf804a44b612f587d12d5b59b"} Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.632134 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ff9608f12ccce9144a4783111b5c1817b6059daf804a44b612f587d12d5b59b" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.636100 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3","Type":"ContainerStarted","Data":"9989c73319857c1dfbd53314ea5c9c604b563ab7ff0b98abc71effd88840aea9"} Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.636260 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerName="ceilometer-central-agent" containerID="cri-o://9d585ad6ed5c1ed2d4084a6c16dcddd38749967ad5d1ed0f89f762b91336348a" gracePeriod=30 Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.636336 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerName="proxy-httpd" containerID="cri-o://9989c73319857c1dfbd53314ea5c9c604b563ab7ff0b98abc71effd88840aea9" gracePeriod=30 Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.636381 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerName="sg-core" containerID="cri-o://d79d9b2079c8d418c02ab7fa7ad5efa4707c05b7a47d13e0ad94309dd61e672a" gracePeriod=30 Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.636424 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerName="ceilometer-notification-agent" containerID="cri-o://31fb9037fd19e39c1ea46708082033162678704f4a0f4ad2fd303ff13e532e29" gracePeriod=30 Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.636548 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.640517 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7cc9-account-create-update-fvpgx" event={"ID":"a8bf5691-6407-4038-8a23-3d562ec05262","Type":"ContainerDied","Data":"e5fb5f8f51ecf0fcd5c21f5cb18a89f79502d9fad7566c4505013c3818f6e773"} Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.640562 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5fb5f8f51ecf0fcd5c21f5cb18a89f79502d9fad7566c4505013c3818f6e773" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.640660 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7cc9-account-create-update-fvpgx" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.644698 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.644914 4857 generic.go:334] "Generic (PLEG): container finished" podID="87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" containerID="e54d67a38fbee3a524172c908eaa8f9b96b59b1bd613edfba5494e6dc6f75b3e" exitCode=0 Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.645037 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.645123 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa","Type":"ContainerDied","Data":"e54d67a38fbee3a524172c908eaa8f9b96b59b1bd613edfba5494e6dc6f75b3e"} Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.645171 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa","Type":"ContainerDied","Data":"90a1d135eac8123f28e076e4f4eb9271cae9df227a1da250c9a71664dd80a384"} Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.645214 4857 scope.go:117] "RemoveContainer" containerID="e54d67a38fbee3a524172c908eaa8f9b96b59b1bd613edfba5494e6dc6f75b3e" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.647565 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-57bwd" event={"ID":"205a6e10-4a06-4d87-b90f-5787e68be49d","Type":"ContainerDied","Data":"2311c10af88d66e64a34f946c025eaf943110f167a9dc7b9549e87bf816921b9"} Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.647603 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2311c10af88d66e64a34f946c025eaf943110f167a9dc7b9549e87bf816921b9" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.647681 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-57bwd" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.681133 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.904070533 podStartE2EDuration="6.681113031s" podCreationTimestamp="2025-11-28 13:41:34 +0000 UTC" firstStartedPulling="2025-11-28 13:41:35.348604044 +0000 UTC m=+1387.375979211" lastFinishedPulling="2025-11-28 13:41:40.125646542 +0000 UTC m=+1392.153021709" observedRunningTime="2025-11-28 13:41:40.668444956 +0000 UTC m=+1392.695820123" watchObservedRunningTime="2025-11-28 13:41:40.681113031 +0000 UTC m=+1392.708488198" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.693404 4857 scope.go:117] "RemoveContainer" containerID="df9b47ff8198bdafa7125b2e7a080b861355bfe30230d7f8a3ec52fcee86264f" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.729569 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.772193 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.791030 4857 scope.go:117] "RemoveContainer" containerID="e54d67a38fbee3a524172c908eaa8f9b96b59b1bd613edfba5494e6dc6f75b3e" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.791148 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:41:40 crc kubenswrapper[4857]: E1128 13:41:40.791601 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1df1224d-12ff-4f52-bdbe-533b53f8991c" containerName="mariadb-database-create" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.791626 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1df1224d-12ff-4f52-bdbe-533b53f8991c" containerName="mariadb-database-create" Nov 28 13:41:40 crc kubenswrapper[4857]: E1128 13:41:40.791648 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" containerName="glance-log" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.791657 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" containerName="glance-log" Nov 28 13:41:40 crc kubenswrapper[4857]: E1128 13:41:40.791670 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="205a6e10-4a06-4d87-b90f-5787e68be49d" containerName="mariadb-database-create" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.791678 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="205a6e10-4a06-4d87-b90f-5787e68be49d" containerName="mariadb-database-create" Nov 28 13:41:40 crc kubenswrapper[4857]: E1128 13:41:40.791700 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8bf5691-6407-4038-8a23-3d562ec05262" containerName="mariadb-account-create-update" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.791708 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8bf5691-6407-4038-8a23-3d562ec05262" containerName="mariadb-account-create-update" Nov 28 13:41:40 crc kubenswrapper[4857]: E1128 13:41:40.791727 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" containerName="glance-httpd" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.791735 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" containerName="glance-httpd" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.792301 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="1df1224d-12ff-4f52-bdbe-533b53f8991c" containerName="mariadb-database-create" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.792333 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" containerName="glance-httpd" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.792351 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" containerName="glance-log" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.792359 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8bf5691-6407-4038-8a23-3d562ec05262" containerName="mariadb-account-create-update" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.792375 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="205a6e10-4a06-4d87-b90f-5787e68be49d" containerName="mariadb-database-create" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.793578 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: E1128 13:41:40.794548 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e54d67a38fbee3a524172c908eaa8f9b96b59b1bd613edfba5494e6dc6f75b3e\": container with ID starting with e54d67a38fbee3a524172c908eaa8f9b96b59b1bd613edfba5494e6dc6f75b3e not found: ID does not exist" containerID="e54d67a38fbee3a524172c908eaa8f9b96b59b1bd613edfba5494e6dc6f75b3e" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.794582 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e54d67a38fbee3a524172c908eaa8f9b96b59b1bd613edfba5494e6dc6f75b3e"} err="failed to get container status \"e54d67a38fbee3a524172c908eaa8f9b96b59b1bd613edfba5494e6dc6f75b3e\": rpc error: code = NotFound desc = could not find container \"e54d67a38fbee3a524172c908eaa8f9b96b59b1bd613edfba5494e6dc6f75b3e\": container with ID starting with e54d67a38fbee3a524172c908eaa8f9b96b59b1bd613edfba5494e6dc6f75b3e not found: ID does not exist" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.794606 4857 scope.go:117] "RemoveContainer" containerID="df9b47ff8198bdafa7125b2e7a080b861355bfe30230d7f8a3ec52fcee86264f" Nov 28 13:41:40 crc kubenswrapper[4857]: E1128 13:41:40.803345 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df9b47ff8198bdafa7125b2e7a080b861355bfe30230d7f8a3ec52fcee86264f\": container with ID starting with df9b47ff8198bdafa7125b2e7a080b861355bfe30230d7f8a3ec52fcee86264f not found: ID does not exist" containerID="df9b47ff8198bdafa7125b2e7a080b861355bfe30230d7f8a3ec52fcee86264f" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.803392 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df9b47ff8198bdafa7125b2e7a080b861355bfe30230d7f8a3ec52fcee86264f"} err="failed to get container status \"df9b47ff8198bdafa7125b2e7a080b861355bfe30230d7f8a3ec52fcee86264f\": rpc error: code = NotFound desc = could not find container \"df9b47ff8198bdafa7125b2e7a080b861355bfe30230d7f8a3ec52fcee86264f\": container with ID starting with df9b47ff8198bdafa7125b2e7a080b861355bfe30230d7f8a3ec52fcee86264f not found: ID does not exist" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.807966 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.811540 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.817893 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.848062 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.848116 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-scripts\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.848140 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7bee7127-9367-4882-8ab1-0493128d2641-logs\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.848168 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7bjq\" (UniqueName: \"kubernetes.io/projected/7bee7127-9367-4882-8ab1-0493128d2641-kube-api-access-g7bjq\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.848210 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-config-data\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.848249 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.848290 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.848333 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7bee7127-9367-4882-8ab1-0493128d2641-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.950162 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-scripts\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.950201 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7bee7127-9367-4882-8ab1-0493128d2641-logs\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.950248 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7bjq\" (UniqueName: \"kubernetes.io/projected/7bee7127-9367-4882-8ab1-0493128d2641-kube-api-access-g7bjq\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.950286 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-config-data\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.950347 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.950434 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.950518 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7bee7127-9367-4882-8ab1-0493128d2641-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.950677 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.952140 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.952173 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7bee7127-9367-4882-8ab1-0493128d2641-logs\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.952599 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7bee7127-9367-4882-8ab1-0493128d2641-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.958337 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-scripts\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.959448 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-config-data\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.960371 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.972366 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:40 crc kubenswrapper[4857]: I1128 13:41:40.986472 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7bjq\" (UniqueName: \"kubernetes.io/projected/7bee7127-9367-4882-8ab1-0493128d2641-kube-api-access-g7bjq\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.023788 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " pod="openstack/glance-default-external-api-0" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.147956 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0c56-account-create-update-6lpj4" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.150084 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.223630 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-m8bx4" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.249187 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-32fc-account-create-update-vvdvc" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.258098 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7c62325d-5f8d-4477-9369-5a39a3a0bfc8-operator-scripts\") pod \"7c62325d-5f8d-4477-9369-5a39a3a0bfc8\" (UID: \"7c62325d-5f8d-4477-9369-5a39a3a0bfc8\") " Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.258151 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-987xv\" (UniqueName: \"kubernetes.io/projected/7c62325d-5f8d-4477-9369-5a39a3a0bfc8-kube-api-access-987xv\") pod \"7c62325d-5f8d-4477-9369-5a39a3a0bfc8\" (UID: \"7c62325d-5f8d-4477-9369-5a39a3a0bfc8\") " Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.258380 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-28bq9\" (UniqueName: \"kubernetes.io/projected/2b16d37a-3848-4af5-a224-1e50a611c2d7-kube-api-access-28bq9\") pod \"2b16d37a-3848-4af5-a224-1e50a611c2d7\" (UID: \"2b16d37a-3848-4af5-a224-1e50a611c2d7\") " Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.258405 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mndj\" (UniqueName: \"kubernetes.io/projected/ff6d87b7-1400-461b-ab0c-e122e6e2a5e5-kube-api-access-9mndj\") pod \"ff6d87b7-1400-461b-ab0c-e122e6e2a5e5\" (UID: \"ff6d87b7-1400-461b-ab0c-e122e6e2a5e5\") " Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.259877 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c62325d-5f8d-4477-9369-5a39a3a0bfc8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7c62325d-5f8d-4477-9369-5a39a3a0bfc8" (UID: "7c62325d-5f8d-4477-9369-5a39a3a0bfc8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.270525 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b16d37a-3848-4af5-a224-1e50a611c2d7-kube-api-access-28bq9" (OuterVolumeSpecName: "kube-api-access-28bq9") pod "2b16d37a-3848-4af5-a224-1e50a611c2d7" (UID: "2b16d37a-3848-4af5-a224-1e50a611c2d7"). InnerVolumeSpecName "kube-api-access-28bq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.297812 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c62325d-5f8d-4477-9369-5a39a3a0bfc8-kube-api-access-987xv" (OuterVolumeSpecName: "kube-api-access-987xv") pod "7c62325d-5f8d-4477-9369-5a39a3a0bfc8" (UID: "7c62325d-5f8d-4477-9369-5a39a3a0bfc8"). InnerVolumeSpecName "kube-api-access-987xv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.298189 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff6d87b7-1400-461b-ab0c-e122e6e2a5e5-kube-api-access-9mndj" (OuterVolumeSpecName: "kube-api-access-9mndj") pod "ff6d87b7-1400-461b-ab0c-e122e6e2a5e5" (UID: "ff6d87b7-1400-461b-ab0c-e122e6e2a5e5"). InnerVolumeSpecName "kube-api-access-9mndj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.362086 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b16d37a-3848-4af5-a224-1e50a611c2d7-operator-scripts\") pod \"2b16d37a-3848-4af5-a224-1e50a611c2d7\" (UID: \"2b16d37a-3848-4af5-a224-1e50a611c2d7\") " Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.362271 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ff6d87b7-1400-461b-ab0c-e122e6e2a5e5-operator-scripts\") pod \"ff6d87b7-1400-461b-ab0c-e122e6e2a5e5\" (UID: \"ff6d87b7-1400-461b-ab0c-e122e6e2a5e5\") " Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.362711 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7c62325d-5f8d-4477-9369-5a39a3a0bfc8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.362732 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-987xv\" (UniqueName: \"kubernetes.io/projected/7c62325d-5f8d-4477-9369-5a39a3a0bfc8-kube-api-access-987xv\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.362743 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-28bq9\" (UniqueName: \"kubernetes.io/projected/2b16d37a-3848-4af5-a224-1e50a611c2d7-kube-api-access-28bq9\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.363480 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mndj\" (UniqueName: \"kubernetes.io/projected/ff6d87b7-1400-461b-ab0c-e122e6e2a5e5-kube-api-access-9mndj\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.363248 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b16d37a-3848-4af5-a224-1e50a611c2d7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2b16d37a-3848-4af5-a224-1e50a611c2d7" (UID: "2b16d37a-3848-4af5-a224-1e50a611c2d7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.363851 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff6d87b7-1400-461b-ab0c-e122e6e2a5e5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ff6d87b7-1400-461b-ab0c-e122e6e2a5e5" (UID: "ff6d87b7-1400-461b-ab0c-e122e6e2a5e5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.464856 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b16d37a-3848-4af5-a224-1e50a611c2d7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.464881 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ff6d87b7-1400-461b-ab0c-e122e6e2a5e5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.673939 4857 generic.go:334] "Generic (PLEG): container finished" podID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerID="9989c73319857c1dfbd53314ea5c9c604b563ab7ff0b98abc71effd88840aea9" exitCode=0 Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.674265 4857 generic.go:334] "Generic (PLEG): container finished" podID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerID="d79d9b2079c8d418c02ab7fa7ad5efa4707c05b7a47d13e0ad94309dd61e672a" exitCode=2 Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.674278 4857 generic.go:334] "Generic (PLEG): container finished" podID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerID="31fb9037fd19e39c1ea46708082033162678704f4a0f4ad2fd303ff13e532e29" exitCode=0 Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.674001 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3","Type":"ContainerDied","Data":"9989c73319857c1dfbd53314ea5c9c604b563ab7ff0b98abc71effd88840aea9"} Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.674349 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3","Type":"ContainerDied","Data":"d79d9b2079c8d418c02ab7fa7ad5efa4707c05b7a47d13e0ad94309dd61e672a"} Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.674374 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3","Type":"ContainerDied","Data":"31fb9037fd19e39c1ea46708082033162678704f4a0f4ad2fd303ff13e532e29"} Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.677230 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0c56-account-create-update-6lpj4" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.677231 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-0c56-account-create-update-6lpj4" event={"ID":"7c62325d-5f8d-4477-9369-5a39a3a0bfc8","Type":"ContainerDied","Data":"cfae693de14c4a4b74069e67a842e64346b11ba40442eec93645850c8d22cc1f"} Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.677658 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cfae693de14c4a4b74069e67a842e64346b11ba40442eec93645850c8d22cc1f" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.679478 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-32fc-account-create-update-vvdvc" event={"ID":"2b16d37a-3848-4af5-a224-1e50a611c2d7","Type":"ContainerDied","Data":"0f1b6b97b8fa60ff21a8f870fc36e9488006b88d01e0161d613c2497d7ea30b8"} Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.679504 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f1b6b97b8fa60ff21a8f870fc36e9488006b88d01e0161d613c2497d7ea30b8" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.679506 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-32fc-account-create-update-vvdvc" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.693428 4857 generic.go:334] "Generic (PLEG): container finished" podID="aa5c6527-63ae-4b20-b497-8b7abe609110" containerID="c065e75bcff4982d3c580553c3075a92f37176a3153b6c5c8ecbfbaecc74b5c5" exitCode=0 Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.693499 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"aa5c6527-63ae-4b20-b497-8b7abe609110","Type":"ContainerDied","Data":"c065e75bcff4982d3c580553c3075a92f37176a3153b6c5c8ecbfbaecc74b5c5"} Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.696225 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-m8bx4" event={"ID":"ff6d87b7-1400-461b-ab0c-e122e6e2a5e5","Type":"ContainerDied","Data":"a4e62f71df43d167e5d1d73854c435880772e7f03b520c1ccfbd87f29c99ed30"} Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.696256 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4e62f71df43d167e5d1d73854c435880772e7f03b520c1ccfbd87f29c99ed30" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.696313 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-m8bx4" Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.784661 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:41:41 crc kubenswrapper[4857]: W1128 13:41:41.809668 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7bee7127_9367_4882_8ab1_0493128d2641.slice/crio-b412b43cd790cbb5062aff33c4ac84ffd760e41e2d1023e30df25e13845e09dc WatchSource:0}: Error finding container b412b43cd790cbb5062aff33c4ac84ffd760e41e2d1023e30df25e13845e09dc: Status 404 returned error can't find the container with id b412b43cd790cbb5062aff33c4ac84ffd760e41e2d1023e30df25e13845e09dc Nov 28 13:41:41 crc kubenswrapper[4857]: I1128 13:41:41.937858 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.078324 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-scripts\") pod \"aa5c6527-63ae-4b20-b497-8b7abe609110\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.078410 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aa5c6527-63ae-4b20-b497-8b7abe609110-httpd-run\") pod \"aa5c6527-63ae-4b20-b497-8b7abe609110\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.078477 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa5c6527-63ae-4b20-b497-8b7abe609110-logs\") pod \"aa5c6527-63ae-4b20-b497-8b7abe609110\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.078507 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"aa5c6527-63ae-4b20-b497-8b7abe609110\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.078541 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-internal-tls-certs\") pod \"aa5c6527-63ae-4b20-b497-8b7abe609110\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.078574 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-config-data\") pod \"aa5c6527-63ae-4b20-b497-8b7abe609110\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.078642 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-combined-ca-bundle\") pod \"aa5c6527-63ae-4b20-b497-8b7abe609110\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.078679 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmdmm\" (UniqueName: \"kubernetes.io/projected/aa5c6527-63ae-4b20-b497-8b7abe609110-kube-api-access-mmdmm\") pod \"aa5c6527-63ae-4b20-b497-8b7abe609110\" (UID: \"aa5c6527-63ae-4b20-b497-8b7abe609110\") " Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.080195 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa5c6527-63ae-4b20-b497-8b7abe609110-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "aa5c6527-63ae-4b20-b497-8b7abe609110" (UID: "aa5c6527-63ae-4b20-b497-8b7abe609110"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.080547 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa5c6527-63ae-4b20-b497-8b7abe609110-logs" (OuterVolumeSpecName: "logs") pod "aa5c6527-63ae-4b20-b497-8b7abe609110" (UID: "aa5c6527-63ae-4b20-b497-8b7abe609110"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.084872 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "aa5c6527-63ae-4b20-b497-8b7abe609110" (UID: "aa5c6527-63ae-4b20-b497-8b7abe609110"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.086899 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa5c6527-63ae-4b20-b497-8b7abe609110-kube-api-access-mmdmm" (OuterVolumeSpecName: "kube-api-access-mmdmm") pod "aa5c6527-63ae-4b20-b497-8b7abe609110" (UID: "aa5c6527-63ae-4b20-b497-8b7abe609110"). InnerVolumeSpecName "kube-api-access-mmdmm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.087943 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-scripts" (OuterVolumeSpecName: "scripts") pod "aa5c6527-63ae-4b20-b497-8b7abe609110" (UID: "aa5c6527-63ae-4b20-b497-8b7abe609110"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.119105 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aa5c6527-63ae-4b20-b497-8b7abe609110" (UID: "aa5c6527-63ae-4b20-b497-8b7abe609110"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.148180 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "aa5c6527-63ae-4b20-b497-8b7abe609110" (UID: "aa5c6527-63ae-4b20-b497-8b7abe609110"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.157133 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-config-data" (OuterVolumeSpecName: "config-data") pod "aa5c6527-63ae-4b20-b497-8b7abe609110" (UID: "aa5c6527-63ae-4b20-b497-8b7abe609110"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.180796 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.180827 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmdmm\" (UniqueName: \"kubernetes.io/projected/aa5c6527-63ae-4b20-b497-8b7abe609110-kube-api-access-mmdmm\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.180840 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.180848 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aa5c6527-63ae-4b20-b497-8b7abe609110-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.180857 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa5c6527-63ae-4b20-b497-8b7abe609110-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.180891 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.180901 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.180909 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa5c6527-63ae-4b20-b497-8b7abe609110-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.201339 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.282147 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.321197 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa" path="/var/lib/kubelet/pods/87c314fb-4c6d-4182-bd6f-a0b8bf66ecaa/volumes" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.707582 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7bee7127-9367-4882-8ab1-0493128d2641","Type":"ContainerStarted","Data":"361acb609316369ca05f319244bbf84ef779ab12c43ab51d140a9f1785789d5e"} Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.707621 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7bee7127-9367-4882-8ab1-0493128d2641","Type":"ContainerStarted","Data":"b412b43cd790cbb5062aff33c4ac84ffd760e41e2d1023e30df25e13845e09dc"} Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.709401 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"aa5c6527-63ae-4b20-b497-8b7abe609110","Type":"ContainerDied","Data":"88f91a9d2294254074d637616c9d35e5a0d4dd1b1df56ac9858e363f1e7fb0b5"} Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.709434 4857 scope.go:117] "RemoveContainer" containerID="c065e75bcff4982d3c580553c3075a92f37176a3153b6c5c8ecbfbaecc74b5c5" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.709488 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.746588 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.772343 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.777112 4857 scope.go:117] "RemoveContainer" containerID="2e90c406f3ce5f6e243a58b3a523f5bbbd5c50f0ea05fb2946f569018ff927c0" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.809606 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:41:42 crc kubenswrapper[4857]: E1128 13:41:42.810178 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff6d87b7-1400-461b-ab0c-e122e6e2a5e5" containerName="mariadb-database-create" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.810196 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff6d87b7-1400-461b-ab0c-e122e6e2a5e5" containerName="mariadb-database-create" Nov 28 13:41:42 crc kubenswrapper[4857]: E1128 13:41:42.810210 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa5c6527-63ae-4b20-b497-8b7abe609110" containerName="glance-log" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.810216 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa5c6527-63ae-4b20-b497-8b7abe609110" containerName="glance-log" Nov 28 13:41:42 crc kubenswrapper[4857]: E1128 13:41:42.810227 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b16d37a-3848-4af5-a224-1e50a611c2d7" containerName="mariadb-account-create-update" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.810234 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b16d37a-3848-4af5-a224-1e50a611c2d7" containerName="mariadb-account-create-update" Nov 28 13:41:42 crc kubenswrapper[4857]: E1128 13:41:42.810246 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c62325d-5f8d-4477-9369-5a39a3a0bfc8" containerName="mariadb-account-create-update" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.810252 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c62325d-5f8d-4477-9369-5a39a3a0bfc8" containerName="mariadb-account-create-update" Nov 28 13:41:42 crc kubenswrapper[4857]: E1128 13:41:42.810272 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa5c6527-63ae-4b20-b497-8b7abe609110" containerName="glance-httpd" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.810278 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa5c6527-63ae-4b20-b497-8b7abe609110" containerName="glance-httpd" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.810433 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b16d37a-3848-4af5-a224-1e50a611c2d7" containerName="mariadb-account-create-update" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.810450 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa5c6527-63ae-4b20-b497-8b7abe609110" containerName="glance-httpd" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.810458 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c62325d-5f8d-4477-9369-5a39a3a0bfc8" containerName="mariadb-account-create-update" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.810468 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff6d87b7-1400-461b-ab0c-e122e6e2a5e5" containerName="mariadb-database-create" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.810479 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa5c6527-63ae-4b20-b497-8b7abe609110" containerName="glance-log" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.811417 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.813874 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.813978 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.823271 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.992185 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.992493 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.992524 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5876\" (UniqueName: \"kubernetes.io/projected/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-kube-api-access-p5876\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.992558 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.992575 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-logs\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.992613 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.992663 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:42 crc kubenswrapper[4857]: I1128 13:41:42.992682 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.094404 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.094453 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.094510 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.094537 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.094564 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5876\" (UniqueName: \"kubernetes.io/projected/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-kube-api-access-p5876\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.094594 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.094613 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-logs\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.094649 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.095665 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.096165 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.098144 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-logs\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.101828 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.102025 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.102366 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.103592 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.128786 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5876\" (UniqueName: \"kubernetes.io/projected/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-kube-api-access-p5876\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.142262 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.170793 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.273489 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.400406 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-log-httpd\") pod \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.400554 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-combined-ca-bundle\") pod \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.400595 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wjrwq\" (UniqueName: \"kubernetes.io/projected/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-kube-api-access-wjrwq\") pod \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.400634 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-scripts\") pod \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.400670 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-run-httpd\") pod \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.400688 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-sg-core-conf-yaml\") pod \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.400705 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-config-data\") pod \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\" (UID: \"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3\") " Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.402092 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" (UID: "a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.405917 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" (UID: "a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.411442 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-kube-api-access-wjrwq" (OuterVolumeSpecName: "kube-api-access-wjrwq") pod "a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" (UID: "a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3"). InnerVolumeSpecName "kube-api-access-wjrwq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.417932 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-scripts" (OuterVolumeSpecName: "scripts") pod "a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" (UID: "a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.449349 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" (UID: "a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.505934 4857 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.505965 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wjrwq\" (UniqueName: \"kubernetes.io/projected/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-kube-api-access-wjrwq\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.505975 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.505982 4857 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.505991 4857 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.527178 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" (UID: "a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.533023 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-config-data" (OuterVolumeSpecName: "config-data") pod "a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" (UID: "a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.607084 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.607111 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.721350 4857 generic.go:334] "Generic (PLEG): container finished" podID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerID="9d585ad6ed5c1ed2d4084a6c16dcddd38749967ad5d1ed0f89f762b91336348a" exitCode=0 Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.721405 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3","Type":"ContainerDied","Data":"9d585ad6ed5c1ed2d4084a6c16dcddd38749967ad5d1ed0f89f762b91336348a"} Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.721728 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3","Type":"ContainerDied","Data":"17e9ff36675d97ead2c2a4a69b1937a195122e05caf462a349aacfcee40ce0e2"} Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.721779 4857 scope.go:117] "RemoveContainer" containerID="9989c73319857c1dfbd53314ea5c9c604b563ab7ff0b98abc71effd88840aea9" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.721436 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.723776 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7bee7127-9367-4882-8ab1-0493128d2641","Type":"ContainerStarted","Data":"15caeb74f903a78a3ff675fa24fc2fa63c9da6eab92af97c459eb92425c7c093"} Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.745044 4857 scope.go:117] "RemoveContainer" containerID="d79d9b2079c8d418c02ab7fa7ad5efa4707c05b7a47d13e0ad94309dd61e672a" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.769159 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.7691308709999998 podStartE2EDuration="3.769130871s" podCreationTimestamp="2025-11-28 13:41:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:41:43.755371536 +0000 UTC m=+1395.782746743" watchObservedRunningTime="2025-11-28 13:41:43.769130871 +0000 UTC m=+1395.796506068" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.791159 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.793583 4857 scope.go:117] "RemoveContainer" containerID="31fb9037fd19e39c1ea46708082033162678704f4a0f4ad2fd303ff13e532e29" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.801185 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.812368 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.819305 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:43 crc kubenswrapper[4857]: E1128 13:41:43.819652 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerName="ceilometer-notification-agent" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.819667 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerName="ceilometer-notification-agent" Nov 28 13:41:43 crc kubenswrapper[4857]: E1128 13:41:43.819681 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerName="proxy-httpd" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.819687 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerName="proxy-httpd" Nov 28 13:41:43 crc kubenswrapper[4857]: E1128 13:41:43.819706 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerName="ceilometer-central-agent" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.819713 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerName="ceilometer-central-agent" Nov 28 13:41:43 crc kubenswrapper[4857]: E1128 13:41:43.819721 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerName="sg-core" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.819727 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerName="sg-core" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.820112 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerName="ceilometer-central-agent" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.820129 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerName="proxy-httpd" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.820138 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerName="ceilometer-notification-agent" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.820153 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" containerName="sg-core" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.821601 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.825315 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.825994 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 13:41:43 crc kubenswrapper[4857]: W1128 13:41:43.828084 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a952329_a8d9_432d_ac5b_d88b7e2ede6b.slice/crio-33e7f3856876296097f9a72ec4da890288bb9eeafee3de17fdeeeafb9e5f1c6e WatchSource:0}: Error finding container 33e7f3856876296097f9a72ec4da890288bb9eeafee3de17fdeeeafb9e5f1c6e: Status 404 returned error can't find the container with id 33e7f3856876296097f9a72ec4da890288bb9eeafee3de17fdeeeafb9e5f1c6e Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.836092 4857 scope.go:117] "RemoveContainer" containerID="9d585ad6ed5c1ed2d4084a6c16dcddd38749967ad5d1ed0f89f762b91336348a" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.848718 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.876768 4857 scope.go:117] "RemoveContainer" containerID="9989c73319857c1dfbd53314ea5c9c604b563ab7ff0b98abc71effd88840aea9" Nov 28 13:41:43 crc kubenswrapper[4857]: E1128 13:41:43.877826 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9989c73319857c1dfbd53314ea5c9c604b563ab7ff0b98abc71effd88840aea9\": container with ID starting with 9989c73319857c1dfbd53314ea5c9c604b563ab7ff0b98abc71effd88840aea9 not found: ID does not exist" containerID="9989c73319857c1dfbd53314ea5c9c604b563ab7ff0b98abc71effd88840aea9" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.877888 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9989c73319857c1dfbd53314ea5c9c604b563ab7ff0b98abc71effd88840aea9"} err="failed to get container status \"9989c73319857c1dfbd53314ea5c9c604b563ab7ff0b98abc71effd88840aea9\": rpc error: code = NotFound desc = could not find container \"9989c73319857c1dfbd53314ea5c9c604b563ab7ff0b98abc71effd88840aea9\": container with ID starting with 9989c73319857c1dfbd53314ea5c9c604b563ab7ff0b98abc71effd88840aea9 not found: ID does not exist" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.877976 4857 scope.go:117] "RemoveContainer" containerID="d79d9b2079c8d418c02ab7fa7ad5efa4707c05b7a47d13e0ad94309dd61e672a" Nov 28 13:41:43 crc kubenswrapper[4857]: E1128 13:41:43.879184 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d79d9b2079c8d418c02ab7fa7ad5efa4707c05b7a47d13e0ad94309dd61e672a\": container with ID starting with d79d9b2079c8d418c02ab7fa7ad5efa4707c05b7a47d13e0ad94309dd61e672a not found: ID does not exist" containerID="d79d9b2079c8d418c02ab7fa7ad5efa4707c05b7a47d13e0ad94309dd61e672a" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.879223 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d79d9b2079c8d418c02ab7fa7ad5efa4707c05b7a47d13e0ad94309dd61e672a"} err="failed to get container status \"d79d9b2079c8d418c02ab7fa7ad5efa4707c05b7a47d13e0ad94309dd61e672a\": rpc error: code = NotFound desc = could not find container \"d79d9b2079c8d418c02ab7fa7ad5efa4707c05b7a47d13e0ad94309dd61e672a\": container with ID starting with d79d9b2079c8d418c02ab7fa7ad5efa4707c05b7a47d13e0ad94309dd61e672a not found: ID does not exist" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.879242 4857 scope.go:117] "RemoveContainer" containerID="31fb9037fd19e39c1ea46708082033162678704f4a0f4ad2fd303ff13e532e29" Nov 28 13:41:43 crc kubenswrapper[4857]: E1128 13:41:43.880903 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31fb9037fd19e39c1ea46708082033162678704f4a0f4ad2fd303ff13e532e29\": container with ID starting with 31fb9037fd19e39c1ea46708082033162678704f4a0f4ad2fd303ff13e532e29 not found: ID does not exist" containerID="31fb9037fd19e39c1ea46708082033162678704f4a0f4ad2fd303ff13e532e29" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.880964 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31fb9037fd19e39c1ea46708082033162678704f4a0f4ad2fd303ff13e532e29"} err="failed to get container status \"31fb9037fd19e39c1ea46708082033162678704f4a0f4ad2fd303ff13e532e29\": rpc error: code = NotFound desc = could not find container \"31fb9037fd19e39c1ea46708082033162678704f4a0f4ad2fd303ff13e532e29\": container with ID starting with 31fb9037fd19e39c1ea46708082033162678704f4a0f4ad2fd303ff13e532e29 not found: ID does not exist" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.880993 4857 scope.go:117] "RemoveContainer" containerID="9d585ad6ed5c1ed2d4084a6c16dcddd38749967ad5d1ed0f89f762b91336348a" Nov 28 13:41:43 crc kubenswrapper[4857]: E1128 13:41:43.881328 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d585ad6ed5c1ed2d4084a6c16dcddd38749967ad5d1ed0f89f762b91336348a\": container with ID starting with 9d585ad6ed5c1ed2d4084a6c16dcddd38749967ad5d1ed0f89f762b91336348a not found: ID does not exist" containerID="9d585ad6ed5c1ed2d4084a6c16dcddd38749967ad5d1ed0f89f762b91336348a" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.881376 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d585ad6ed5c1ed2d4084a6c16dcddd38749967ad5d1ed0f89f762b91336348a"} err="failed to get container status \"9d585ad6ed5c1ed2d4084a6c16dcddd38749967ad5d1ed0f89f762b91336348a\": rpc error: code = NotFound desc = could not find container \"9d585ad6ed5c1ed2d4084a6c16dcddd38749967ad5d1ed0f89f762b91336348a\": container with ID starting with 9d585ad6ed5c1ed2d4084a6c16dcddd38749967ad5d1ed0f89f762b91336348a not found: ID does not exist" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.911997 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c39da9c-3096-4b52-98d9-a0fa0044aaab-log-httpd\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.912046 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5nf5\" (UniqueName: \"kubernetes.io/projected/4c39da9c-3096-4b52-98d9-a0fa0044aaab-kube-api-access-p5nf5\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.912092 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c39da9c-3096-4b52-98d9-a0fa0044aaab-run-httpd\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.912108 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.912178 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.912262 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-config-data\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:43 crc kubenswrapper[4857]: I1128 13:41:43.912289 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-scripts\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.013309 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.013682 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-config-data\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.013715 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-scripts\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.013736 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c39da9c-3096-4b52-98d9-a0fa0044aaab-log-httpd\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.013771 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5nf5\" (UniqueName: \"kubernetes.io/projected/4c39da9c-3096-4b52-98d9-a0fa0044aaab-kube-api-access-p5nf5\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.013813 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c39da9c-3096-4b52-98d9-a0fa0044aaab-run-httpd\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.013832 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.014524 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c39da9c-3096-4b52-98d9-a0fa0044aaab-log-httpd\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.015489 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c39da9c-3096-4b52-98d9-a0fa0044aaab-run-httpd\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.019809 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.021666 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-config-data\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.023432 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-scripts\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.025531 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.036502 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5nf5\" (UniqueName: \"kubernetes.io/projected/4c39da9c-3096-4b52-98d9-a0fa0044aaab-kube-api-access-p5nf5\") pod \"ceilometer-0\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " pod="openstack/ceilometer-0" Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.166451 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.324507 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3" path="/var/lib/kubelet/pods/a7fc1f59-9d53-4da9-9eed-1c0d4ba10bf3/volumes" Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.327363 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa5c6527-63ae-4b20-b497-8b7abe609110" path="/var/lib/kubelet/pods/aa5c6527-63ae-4b20-b497-8b7abe609110/volumes" Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.618789 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:44 crc kubenswrapper[4857]: W1128 13:41:44.621687 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c39da9c_3096_4b52_98d9_a0fa0044aaab.slice/crio-0e7a05540dbb4c6d7b2087736aa897ccf30f7abe3265824c0b9bf5d4a614997c WatchSource:0}: Error finding container 0e7a05540dbb4c6d7b2087736aa897ccf30f7abe3265824c0b9bf5d4a614997c: Status 404 returned error can't find the container with id 0e7a05540dbb4c6d7b2087736aa897ccf30f7abe3265824c0b9bf5d4a614997c Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.757639 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c39da9c-3096-4b52-98d9-a0fa0044aaab","Type":"ContainerStarted","Data":"0e7a05540dbb4c6d7b2087736aa897ccf30f7abe3265824c0b9bf5d4a614997c"} Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.760453 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3a952329-a8d9-432d-ac5b-d88b7e2ede6b","Type":"ContainerStarted","Data":"990eadd5834f267197096c5bc4a36f6e0524a5b8386ca4956f7f56c8c34c8ce5"} Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.760491 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3a952329-a8d9-432d-ac5b-d88b7e2ede6b","Type":"ContainerStarted","Data":"33e7f3856876296097f9a72ec4da890288bb9eeafee3de17fdeeeafb9e5f1c6e"} Nov 28 13:41:45 crc kubenswrapper[4857]: I1128 13:41:45.089395 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:45 crc kubenswrapper[4857]: I1128 13:41:45.775657 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c39da9c-3096-4b52-98d9-a0fa0044aaab","Type":"ContainerStarted","Data":"f19829722ac79392d8627616164f55d72db13faf46078a7d043faaae0f59d961"} Nov 28 13:41:45 crc kubenswrapper[4857]: I1128 13:41:45.790184 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3a952329-a8d9-432d-ac5b-d88b7e2ede6b","Type":"ContainerStarted","Data":"d8e862b58223c1ae15f7828a07974724e3a49c1477b31569a8dbea821c8bc09e"} Nov 28 13:41:45 crc kubenswrapper[4857]: I1128 13:41:45.821081 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.821062768 podStartE2EDuration="3.821062768s" podCreationTimestamp="2025-11-28 13:41:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:41:45.807268471 +0000 UTC m=+1397.834643638" watchObservedRunningTime="2025-11-28 13:41:45.821062768 +0000 UTC m=+1397.848437935" Nov 28 13:41:46 crc kubenswrapper[4857]: I1128 13:41:46.800429 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c39da9c-3096-4b52-98d9-a0fa0044aaab","Type":"ContainerStarted","Data":"8edf3fe5d10ee00fb8eee881be08aa852f81529ca6a7be17102ad0497899a8b9"} Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.274553 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fb46r"] Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.276220 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fb46r" Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.280897 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.281226 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.293937 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fb46r"] Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.294272 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-pv849" Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.381597 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f43bd8e8-a5d3-4575-894a-8df5746b831d-scripts\") pod \"nova-cell0-conductor-db-sync-fb46r\" (UID: \"f43bd8e8-a5d3-4575-894a-8df5746b831d\") " pod="openstack/nova-cell0-conductor-db-sync-fb46r" Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.381889 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f43bd8e8-a5d3-4575-894a-8df5746b831d-config-data\") pod \"nova-cell0-conductor-db-sync-fb46r\" (UID: \"f43bd8e8-a5d3-4575-894a-8df5746b831d\") " pod="openstack/nova-cell0-conductor-db-sync-fb46r" Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.382015 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f43bd8e8-a5d3-4575-894a-8df5746b831d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-fb46r\" (UID: \"f43bd8e8-a5d3-4575-894a-8df5746b831d\") " pod="openstack/nova-cell0-conductor-db-sync-fb46r" Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.382535 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcb5r\" (UniqueName: \"kubernetes.io/projected/f43bd8e8-a5d3-4575-894a-8df5746b831d-kube-api-access-zcb5r\") pod \"nova-cell0-conductor-db-sync-fb46r\" (UID: \"f43bd8e8-a5d3-4575-894a-8df5746b831d\") " pod="openstack/nova-cell0-conductor-db-sync-fb46r" Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.493953 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f43bd8e8-a5d3-4575-894a-8df5746b831d-scripts\") pod \"nova-cell0-conductor-db-sync-fb46r\" (UID: \"f43bd8e8-a5d3-4575-894a-8df5746b831d\") " pod="openstack/nova-cell0-conductor-db-sync-fb46r" Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.493998 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f43bd8e8-a5d3-4575-894a-8df5746b831d-config-data\") pod \"nova-cell0-conductor-db-sync-fb46r\" (UID: \"f43bd8e8-a5d3-4575-894a-8df5746b831d\") " pod="openstack/nova-cell0-conductor-db-sync-fb46r" Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.494032 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f43bd8e8-a5d3-4575-894a-8df5746b831d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-fb46r\" (UID: \"f43bd8e8-a5d3-4575-894a-8df5746b831d\") " pod="openstack/nova-cell0-conductor-db-sync-fb46r" Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.494063 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcb5r\" (UniqueName: \"kubernetes.io/projected/f43bd8e8-a5d3-4575-894a-8df5746b831d-kube-api-access-zcb5r\") pod \"nova-cell0-conductor-db-sync-fb46r\" (UID: \"f43bd8e8-a5d3-4575-894a-8df5746b831d\") " pod="openstack/nova-cell0-conductor-db-sync-fb46r" Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.499124 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f43bd8e8-a5d3-4575-894a-8df5746b831d-config-data\") pod \"nova-cell0-conductor-db-sync-fb46r\" (UID: \"f43bd8e8-a5d3-4575-894a-8df5746b831d\") " pod="openstack/nova-cell0-conductor-db-sync-fb46r" Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.500416 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f43bd8e8-a5d3-4575-894a-8df5746b831d-scripts\") pod \"nova-cell0-conductor-db-sync-fb46r\" (UID: \"f43bd8e8-a5d3-4575-894a-8df5746b831d\") " pod="openstack/nova-cell0-conductor-db-sync-fb46r" Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.510322 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f43bd8e8-a5d3-4575-894a-8df5746b831d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-fb46r\" (UID: \"f43bd8e8-a5d3-4575-894a-8df5746b831d\") " pod="openstack/nova-cell0-conductor-db-sync-fb46r" Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.512896 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcb5r\" (UniqueName: \"kubernetes.io/projected/f43bd8e8-a5d3-4575-894a-8df5746b831d-kube-api-access-zcb5r\") pod \"nova-cell0-conductor-db-sync-fb46r\" (UID: \"f43bd8e8-a5d3-4575-894a-8df5746b831d\") " pod="openstack/nova-cell0-conductor-db-sync-fb46r" Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.594597 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fb46r" Nov 28 13:41:47 crc kubenswrapper[4857]: I1128 13:41:47.812350 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c39da9c-3096-4b52-98d9-a0fa0044aaab","Type":"ContainerStarted","Data":"810cb0cc9b151e06157716ba5770c7f298b4e0c8c9e8c40ebe0eab779d607d81"} Nov 28 13:41:48 crc kubenswrapper[4857]: I1128 13:41:48.061816 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fb46r"] Nov 28 13:41:48 crc kubenswrapper[4857]: W1128 13:41:48.065143 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf43bd8e8_a5d3_4575_894a_8df5746b831d.slice/crio-e40b8fecab6070f09900fff3ddeac6fd38d114509c94d460e7619db2e162670d WatchSource:0}: Error finding container e40b8fecab6070f09900fff3ddeac6fd38d114509c94d460e7619db2e162670d: Status 404 returned error can't find the container with id e40b8fecab6070f09900fff3ddeac6fd38d114509c94d460e7619db2e162670d Nov 28 13:41:48 crc kubenswrapper[4857]: I1128 13:41:48.822783 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c39da9c-3096-4b52-98d9-a0fa0044aaab","Type":"ContainerStarted","Data":"09be6f799fbff1dca9f52dabb0e34c2ec98bc4fdc9d8aa51e11d0c227b85b871"} Nov 28 13:41:48 crc kubenswrapper[4857]: I1128 13:41:48.823132 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 13:41:48 crc kubenswrapper[4857]: I1128 13:41:48.823128 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerName="ceilometer-central-agent" containerID="cri-o://f19829722ac79392d8627616164f55d72db13faf46078a7d043faaae0f59d961" gracePeriod=30 Nov 28 13:41:48 crc kubenswrapper[4857]: I1128 13:41:48.823255 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerName="proxy-httpd" containerID="cri-o://09be6f799fbff1dca9f52dabb0e34c2ec98bc4fdc9d8aa51e11d0c227b85b871" gracePeriod=30 Nov 28 13:41:48 crc kubenswrapper[4857]: I1128 13:41:48.823288 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerName="sg-core" containerID="cri-o://810cb0cc9b151e06157716ba5770c7f298b4e0c8c9e8c40ebe0eab779d607d81" gracePeriod=30 Nov 28 13:41:48 crc kubenswrapper[4857]: I1128 13:41:48.823315 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerName="ceilometer-notification-agent" containerID="cri-o://8edf3fe5d10ee00fb8eee881be08aa852f81529ca6a7be17102ad0497899a8b9" gracePeriod=30 Nov 28 13:41:48 crc kubenswrapper[4857]: I1128 13:41:48.825378 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fb46r" event={"ID":"f43bd8e8-a5d3-4575-894a-8df5746b831d","Type":"ContainerStarted","Data":"e40b8fecab6070f09900fff3ddeac6fd38d114509c94d460e7619db2e162670d"} Nov 28 13:41:48 crc kubenswrapper[4857]: I1128 13:41:48.862509 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.044601682 podStartE2EDuration="5.862488219s" podCreationTimestamp="2025-11-28 13:41:43 +0000 UTC" firstStartedPulling="2025-11-28 13:41:44.623624022 +0000 UTC m=+1396.650999189" lastFinishedPulling="2025-11-28 13:41:48.441510559 +0000 UTC m=+1400.468885726" observedRunningTime="2025-11-28 13:41:48.852163452 +0000 UTC m=+1400.879538639" watchObservedRunningTime="2025-11-28 13:41:48.862488219 +0000 UTC m=+1400.889863386" Nov 28 13:41:49 crc kubenswrapper[4857]: I1128 13:41:49.865859 4857 generic.go:334] "Generic (PLEG): container finished" podID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerID="09be6f799fbff1dca9f52dabb0e34c2ec98bc4fdc9d8aa51e11d0c227b85b871" exitCode=0 Nov 28 13:41:49 crc kubenswrapper[4857]: I1128 13:41:49.865890 4857 generic.go:334] "Generic (PLEG): container finished" podID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerID="810cb0cc9b151e06157716ba5770c7f298b4e0c8c9e8c40ebe0eab779d607d81" exitCode=2 Nov 28 13:41:49 crc kubenswrapper[4857]: I1128 13:41:49.865898 4857 generic.go:334] "Generic (PLEG): container finished" podID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerID="8edf3fe5d10ee00fb8eee881be08aa852f81529ca6a7be17102ad0497899a8b9" exitCode=0 Nov 28 13:41:49 crc kubenswrapper[4857]: I1128 13:41:49.865916 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c39da9c-3096-4b52-98d9-a0fa0044aaab","Type":"ContainerDied","Data":"09be6f799fbff1dca9f52dabb0e34c2ec98bc4fdc9d8aa51e11d0c227b85b871"} Nov 28 13:41:49 crc kubenswrapper[4857]: I1128 13:41:49.865939 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c39da9c-3096-4b52-98d9-a0fa0044aaab","Type":"ContainerDied","Data":"810cb0cc9b151e06157716ba5770c7f298b4e0c8c9e8c40ebe0eab779d607d81"} Nov 28 13:41:49 crc kubenswrapper[4857]: I1128 13:41:49.865948 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c39da9c-3096-4b52-98d9-a0fa0044aaab","Type":"ContainerDied","Data":"8edf3fe5d10ee00fb8eee881be08aa852f81529ca6a7be17102ad0497899a8b9"} Nov 28 13:41:51 crc kubenswrapper[4857]: I1128 13:41:51.151401 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 13:41:51 crc kubenswrapper[4857]: I1128 13:41:51.152027 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 13:41:51 crc kubenswrapper[4857]: I1128 13:41:51.183509 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 13:41:51 crc kubenswrapper[4857]: I1128 13:41:51.213412 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 13:41:51 crc kubenswrapper[4857]: I1128 13:41:51.889532 4857 generic.go:334] "Generic (PLEG): container finished" podID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerID="f19829722ac79392d8627616164f55d72db13faf46078a7d043faaae0f59d961" exitCode=0 Nov 28 13:41:51 crc kubenswrapper[4857]: I1128 13:41:51.889611 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c39da9c-3096-4b52-98d9-a0fa0044aaab","Type":"ContainerDied","Data":"f19829722ac79392d8627616164f55d72db13faf46078a7d043faaae0f59d961"} Nov 28 13:41:51 crc kubenswrapper[4857]: I1128 13:41:51.890174 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 13:41:51 crc kubenswrapper[4857]: I1128 13:41:51.890200 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 13:41:53 crc kubenswrapper[4857]: I1128 13:41:53.172264 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 13:41:53 crc kubenswrapper[4857]: I1128 13:41:53.172326 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 13:41:53 crc kubenswrapper[4857]: I1128 13:41:53.220675 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 13:41:53 crc kubenswrapper[4857]: I1128 13:41:53.232483 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 13:41:53 crc kubenswrapper[4857]: I1128 13:41:53.831410 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 13:41:53 crc kubenswrapper[4857]: I1128 13:41:53.836105 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 13:41:53 crc kubenswrapper[4857]: I1128 13:41:53.919546 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 13:41:53 crc kubenswrapper[4857]: I1128 13:41:53.919578 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 13:41:55 crc kubenswrapper[4857]: I1128 13:41:55.806027 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 13:41:55 crc kubenswrapper[4857]: I1128 13:41:55.807022 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.040900 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.097344 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c39da9c-3096-4b52-98d9-a0fa0044aaab-log-httpd\") pod \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.097397 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p5nf5\" (UniqueName: \"kubernetes.io/projected/4c39da9c-3096-4b52-98d9-a0fa0044aaab-kube-api-access-p5nf5\") pod \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.097444 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-sg-core-conf-yaml\") pod \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.097469 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-scripts\") pod \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.097493 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-config-data\") pod \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.097556 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-combined-ca-bundle\") pod \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.097687 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c39da9c-3096-4b52-98d9-a0fa0044aaab-run-httpd\") pod \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\" (UID: \"4c39da9c-3096-4b52-98d9-a0fa0044aaab\") " Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.098041 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c39da9c-3096-4b52-98d9-a0fa0044aaab-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4c39da9c-3096-4b52-98d9-a0fa0044aaab" (UID: "4c39da9c-3096-4b52-98d9-a0fa0044aaab"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.098549 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c39da9c-3096-4b52-98d9-a0fa0044aaab-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4c39da9c-3096-4b52-98d9-a0fa0044aaab" (UID: "4c39da9c-3096-4b52-98d9-a0fa0044aaab"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.098579 4857 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c39da9c-3096-4b52-98d9-a0fa0044aaab-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.101995 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-scripts" (OuterVolumeSpecName: "scripts") pod "4c39da9c-3096-4b52-98d9-a0fa0044aaab" (UID: "4c39da9c-3096-4b52-98d9-a0fa0044aaab"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.103172 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c39da9c-3096-4b52-98d9-a0fa0044aaab-kube-api-access-p5nf5" (OuterVolumeSpecName: "kube-api-access-p5nf5") pod "4c39da9c-3096-4b52-98d9-a0fa0044aaab" (UID: "4c39da9c-3096-4b52-98d9-a0fa0044aaab"). InnerVolumeSpecName "kube-api-access-p5nf5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.121087 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "4c39da9c-3096-4b52-98d9-a0fa0044aaab" (UID: "4c39da9c-3096-4b52-98d9-a0fa0044aaab"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.200329 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c39da9c-3096-4b52-98d9-a0fa0044aaab" (UID: "4c39da9c-3096-4b52-98d9-a0fa0044aaab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.200372 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p5nf5\" (UniqueName: \"kubernetes.io/projected/4c39da9c-3096-4b52-98d9-a0fa0044aaab-kube-api-access-p5nf5\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.200400 4857 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.200413 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.200424 4857 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c39da9c-3096-4b52-98d9-a0fa0044aaab-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.217622 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-config-data" (OuterVolumeSpecName: "config-data") pod "4c39da9c-3096-4b52-98d9-a0fa0044aaab" (UID: "4c39da9c-3096-4b52-98d9-a0fa0044aaab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.302579 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.302939 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c39da9c-3096-4b52-98d9-a0fa0044aaab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.969306 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.969541 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c39da9c-3096-4b52-98d9-a0fa0044aaab","Type":"ContainerDied","Data":"0e7a05540dbb4c6d7b2087736aa897ccf30f7abe3265824c0b9bf5d4a614997c"} Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.969829 4857 scope.go:117] "RemoveContainer" containerID="09be6f799fbff1dca9f52dabb0e34c2ec98bc4fdc9d8aa51e11d0c227b85b871" Nov 28 13:41:58 crc kubenswrapper[4857]: I1128 13:41:58.973964 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fb46r" event={"ID":"f43bd8e8-a5d3-4575-894a-8df5746b831d","Type":"ContainerStarted","Data":"c8b37b179bae98a24592b60f7c52271b9cd46845ca9bc9a901e30860b3b46753"} Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.011053 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-fb46r" podStartSLOduration=2.049162039 podStartE2EDuration="12.011019964s" podCreationTimestamp="2025-11-28 13:41:47 +0000 UTC" firstStartedPulling="2025-11-28 13:41:48.067022216 +0000 UTC m=+1400.094397383" lastFinishedPulling="2025-11-28 13:41:58.028880141 +0000 UTC m=+1410.056255308" observedRunningTime="2025-11-28 13:41:58.99834823 +0000 UTC m=+1411.025723447" watchObservedRunningTime="2025-11-28 13:41:59.011019964 +0000 UTC m=+1411.038395201" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.028575 4857 scope.go:117] "RemoveContainer" containerID="810cb0cc9b151e06157716ba5770c7f298b4e0c8c9e8c40ebe0eab779d607d81" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.052849 4857 scope.go:117] "RemoveContainer" containerID="8edf3fe5d10ee00fb8eee881be08aa852f81529ca6a7be17102ad0497899a8b9" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.065220 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.071165 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.081739 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:59 crc kubenswrapper[4857]: E1128 13:41:59.082304 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerName="proxy-httpd" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.082335 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerName="proxy-httpd" Nov 28 13:41:59 crc kubenswrapper[4857]: E1128 13:41:59.082359 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerName="ceilometer-notification-agent" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.082370 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerName="ceilometer-notification-agent" Nov 28 13:41:59 crc kubenswrapper[4857]: E1128 13:41:59.082422 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerName="ceilometer-central-agent" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.082434 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerName="ceilometer-central-agent" Nov 28 13:41:59 crc kubenswrapper[4857]: E1128 13:41:59.082463 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerName="sg-core" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.082474 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerName="sg-core" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.083561 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerName="ceilometer-central-agent" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.083646 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerName="sg-core" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.083669 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerName="proxy-httpd" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.083685 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" containerName="ceilometer-notification-agent" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.086525 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.092345 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.123108 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.123320 4857 scope.go:117] "RemoveContainer" containerID="f19829722ac79392d8627616164f55d72db13faf46078a7d043faaae0f59d961" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.123326 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.225292 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-772x6\" (UniqueName: \"kubernetes.io/projected/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-kube-api-access-772x6\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.225357 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-run-httpd\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.225383 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.225402 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-config-data\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.225516 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-scripts\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.225780 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-log-httpd\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.225897 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.328233 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-log-httpd\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.328311 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.328384 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-772x6\" (UniqueName: \"kubernetes.io/projected/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-kube-api-access-772x6\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.328432 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-run-httpd\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.328458 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-config-data\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.328477 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.328516 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-scripts\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.328776 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-log-httpd\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.328798 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-run-httpd\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.332984 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.334607 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-config-data\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.344418 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-scripts\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.344505 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-772x6\" (UniqueName: \"kubernetes.io/projected/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-kube-api-access-772x6\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.352892 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.439450 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.935414 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:41:59 crc kubenswrapper[4857]: I1128 13:41:59.985352 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e97fad4-9072-4c1e-be9a-7d389ffa65a6","Type":"ContainerStarted","Data":"651d49a82571a51dde022f119b5b01a28457c8a4cd17ea420c77253c48d376aa"} Nov 28 13:42:00 crc kubenswrapper[4857]: I1128 13:42:00.322142 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c39da9c-3096-4b52-98d9-a0fa0044aaab" path="/var/lib/kubelet/pods/4c39da9c-3096-4b52-98d9-a0fa0044aaab/volumes" Nov 28 13:42:00 crc kubenswrapper[4857]: I1128 13:42:00.997937 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e97fad4-9072-4c1e-be9a-7d389ffa65a6","Type":"ContainerStarted","Data":"d4d56c8b40253bdff3b78dfce7375baff03acd16e1eb3619dcaaa0189a673b80"} Nov 28 13:42:03 crc kubenswrapper[4857]: I1128 13:42:03.038102 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e97fad4-9072-4c1e-be9a-7d389ffa65a6","Type":"ContainerStarted","Data":"643f34bc4429cf21c2ddd5f217c5547e99769b91406851c62581dbab5a7adacd"} Nov 28 13:42:03 crc kubenswrapper[4857]: I1128 13:42:03.038610 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e97fad4-9072-4c1e-be9a-7d389ffa65a6","Type":"ContainerStarted","Data":"a0b5c3e794b6a0afd8261726b3d10df95022d96335579198212a7bd02551fb56"} Nov 28 13:42:05 crc kubenswrapper[4857]: I1128 13:42:05.067545 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e97fad4-9072-4c1e-be9a-7d389ffa65a6","Type":"ContainerStarted","Data":"8ec0ce83b0516015af9602e6302492334cf6e42a39f1188e6b5f8521cd6ccac5"} Nov 28 13:42:05 crc kubenswrapper[4857]: I1128 13:42:05.068659 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 13:42:05 crc kubenswrapper[4857]: I1128 13:42:05.102583 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.670889831 podStartE2EDuration="6.102527574s" podCreationTimestamp="2025-11-28 13:41:59 +0000 UTC" firstStartedPulling="2025-11-28 13:41:59.949679906 +0000 UTC m=+1411.977055073" lastFinishedPulling="2025-11-28 13:42:04.381317649 +0000 UTC m=+1416.408692816" observedRunningTime="2025-11-28 13:42:05.094864234 +0000 UTC m=+1417.122239401" watchObservedRunningTime="2025-11-28 13:42:05.102527574 +0000 UTC m=+1417.129902741" Nov 28 13:42:09 crc kubenswrapper[4857]: I1128 13:42:09.121821 4857 generic.go:334] "Generic (PLEG): container finished" podID="f43bd8e8-a5d3-4575-894a-8df5746b831d" containerID="c8b37b179bae98a24592b60f7c52271b9cd46845ca9bc9a901e30860b3b46753" exitCode=0 Nov 28 13:42:09 crc kubenswrapper[4857]: I1128 13:42:09.121895 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fb46r" event={"ID":"f43bd8e8-a5d3-4575-894a-8df5746b831d","Type":"ContainerDied","Data":"c8b37b179bae98a24592b60f7c52271b9cd46845ca9bc9a901e30860b3b46753"} Nov 28 13:42:10 crc kubenswrapper[4857]: I1128 13:42:10.513758 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fb46r" Nov 28 13:42:10 crc kubenswrapper[4857]: I1128 13:42:10.669001 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f43bd8e8-a5d3-4575-894a-8df5746b831d-config-data\") pod \"f43bd8e8-a5d3-4575-894a-8df5746b831d\" (UID: \"f43bd8e8-a5d3-4575-894a-8df5746b831d\") " Nov 28 13:42:10 crc kubenswrapper[4857]: I1128 13:42:10.669043 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f43bd8e8-a5d3-4575-894a-8df5746b831d-scripts\") pod \"f43bd8e8-a5d3-4575-894a-8df5746b831d\" (UID: \"f43bd8e8-a5d3-4575-894a-8df5746b831d\") " Nov 28 13:42:10 crc kubenswrapper[4857]: I1128 13:42:10.669068 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zcb5r\" (UniqueName: \"kubernetes.io/projected/f43bd8e8-a5d3-4575-894a-8df5746b831d-kube-api-access-zcb5r\") pod \"f43bd8e8-a5d3-4575-894a-8df5746b831d\" (UID: \"f43bd8e8-a5d3-4575-894a-8df5746b831d\") " Nov 28 13:42:10 crc kubenswrapper[4857]: I1128 13:42:10.669226 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f43bd8e8-a5d3-4575-894a-8df5746b831d-combined-ca-bundle\") pod \"f43bd8e8-a5d3-4575-894a-8df5746b831d\" (UID: \"f43bd8e8-a5d3-4575-894a-8df5746b831d\") " Nov 28 13:42:10 crc kubenswrapper[4857]: I1128 13:42:10.675011 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f43bd8e8-a5d3-4575-894a-8df5746b831d-kube-api-access-zcb5r" (OuterVolumeSpecName: "kube-api-access-zcb5r") pod "f43bd8e8-a5d3-4575-894a-8df5746b831d" (UID: "f43bd8e8-a5d3-4575-894a-8df5746b831d"). InnerVolumeSpecName "kube-api-access-zcb5r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:42:10 crc kubenswrapper[4857]: I1128 13:42:10.677898 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f43bd8e8-a5d3-4575-894a-8df5746b831d-scripts" (OuterVolumeSpecName: "scripts") pod "f43bd8e8-a5d3-4575-894a-8df5746b831d" (UID: "f43bd8e8-a5d3-4575-894a-8df5746b831d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:10 crc kubenswrapper[4857]: I1128 13:42:10.703679 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f43bd8e8-a5d3-4575-894a-8df5746b831d-config-data" (OuterVolumeSpecName: "config-data") pod "f43bd8e8-a5d3-4575-894a-8df5746b831d" (UID: "f43bd8e8-a5d3-4575-894a-8df5746b831d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:10 crc kubenswrapper[4857]: I1128 13:42:10.704437 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f43bd8e8-a5d3-4575-894a-8df5746b831d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f43bd8e8-a5d3-4575-894a-8df5746b831d" (UID: "f43bd8e8-a5d3-4575-894a-8df5746b831d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:10 crc kubenswrapper[4857]: I1128 13:42:10.771785 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f43bd8e8-a5d3-4575-894a-8df5746b831d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:10 crc kubenswrapper[4857]: I1128 13:42:10.771815 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f43bd8e8-a5d3-4575-894a-8df5746b831d-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:10 crc kubenswrapper[4857]: I1128 13:42:10.771828 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zcb5r\" (UniqueName: \"kubernetes.io/projected/f43bd8e8-a5d3-4575-894a-8df5746b831d-kube-api-access-zcb5r\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:10 crc kubenswrapper[4857]: I1128 13:42:10.771841 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f43bd8e8-a5d3-4575-894a-8df5746b831d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.144167 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fb46r" event={"ID":"f43bd8e8-a5d3-4575-894a-8df5746b831d","Type":"ContainerDied","Data":"e40b8fecab6070f09900fff3ddeac6fd38d114509c94d460e7619db2e162670d"} Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.144223 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e40b8fecab6070f09900fff3ddeac6fd38d114509c94d460e7619db2e162670d" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.144261 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fb46r" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.275182 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 13:42:11 crc kubenswrapper[4857]: E1128 13:42:11.275530 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f43bd8e8-a5d3-4575-894a-8df5746b831d" containerName="nova-cell0-conductor-db-sync" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.275546 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f43bd8e8-a5d3-4575-894a-8df5746b831d" containerName="nova-cell0-conductor-db-sync" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.275741 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f43bd8e8-a5d3-4575-894a-8df5746b831d" containerName="nova-cell0-conductor-db-sync" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.276347 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.282306 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.282633 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-pv849" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.296417 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.380835 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd\") " pod="openstack/nova-cell0-conductor-0" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.380898 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd\") " pod="openstack/nova-cell0-conductor-0" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.381000 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t5xfn\" (UniqueName: \"kubernetes.io/projected/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd-kube-api-access-t5xfn\") pod \"nova-cell0-conductor-0\" (UID: \"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd\") " pod="openstack/nova-cell0-conductor-0" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.482899 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd\") " pod="openstack/nova-cell0-conductor-0" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.482991 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t5xfn\" (UniqueName: \"kubernetes.io/projected/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd-kube-api-access-t5xfn\") pod \"nova-cell0-conductor-0\" (UID: \"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd\") " pod="openstack/nova-cell0-conductor-0" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.483125 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd\") " pod="openstack/nova-cell0-conductor-0" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.488654 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd\") " pod="openstack/nova-cell0-conductor-0" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.488713 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd\") " pod="openstack/nova-cell0-conductor-0" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.503253 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t5xfn\" (UniqueName: \"kubernetes.io/projected/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd-kube-api-access-t5xfn\") pod \"nova-cell0-conductor-0\" (UID: \"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd\") " pod="openstack/nova-cell0-conductor-0" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.594241 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 13:42:12 crc kubenswrapper[4857]: I1128 13:42:12.058424 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 13:42:12 crc kubenswrapper[4857]: I1128 13:42:12.171164 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd","Type":"ContainerStarted","Data":"a811bf4cbc223421dafda56d2fed783117ed79187cf2caf13350b1a08142c53f"} Nov 28 13:42:14 crc kubenswrapper[4857]: I1128 13:42:14.194717 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd","Type":"ContainerStarted","Data":"165d4ed1ed3f722b3e2c4f5769e7c056c9eafbeeed14d27d5233ab4f09a4ef4d"} Nov 28 13:42:14 crc kubenswrapper[4857]: I1128 13:42:14.195480 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 28 13:42:14 crc kubenswrapper[4857]: I1128 13:42:14.216537 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=3.2165162609999998 podStartE2EDuration="3.216516261s" podCreationTimestamp="2025-11-28 13:42:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:42:14.210509988 +0000 UTC m=+1426.237885175" watchObservedRunningTime="2025-11-28 13:42:14.216516261 +0000 UTC m=+1426.243891428" Nov 28 13:42:21 crc kubenswrapper[4857]: I1128 13:42:21.628729 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.081787 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-fcj6h"] Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.083110 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-fcj6h" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.086454 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.087052 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.094464 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-fcj6h"] Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.190122 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54d85556-ca88-4cbe-9aab-b5505d75d5ed-scripts\") pod \"nova-cell0-cell-mapping-fcj6h\" (UID: \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\") " pod="openstack/nova-cell0-cell-mapping-fcj6h" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.194867 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r478q\" (UniqueName: \"kubernetes.io/projected/54d85556-ca88-4cbe-9aab-b5505d75d5ed-kube-api-access-r478q\") pod \"nova-cell0-cell-mapping-fcj6h\" (UID: \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\") " pod="openstack/nova-cell0-cell-mapping-fcj6h" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.194971 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54d85556-ca88-4cbe-9aab-b5505d75d5ed-config-data\") pod \"nova-cell0-cell-mapping-fcj6h\" (UID: \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\") " pod="openstack/nova-cell0-cell-mapping-fcj6h" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.195109 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54d85556-ca88-4cbe-9aab-b5505d75d5ed-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-fcj6h\" (UID: \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\") " pod="openstack/nova-cell0-cell-mapping-fcj6h" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.279336 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.280562 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.283919 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.296833 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54d85556-ca88-4cbe-9aab-b5505d75d5ed-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-fcj6h\" (UID: \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\") " pod="openstack/nova-cell0-cell-mapping-fcj6h" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.296904 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54d85556-ca88-4cbe-9aab-b5505d75d5ed-scripts\") pod \"nova-cell0-cell-mapping-fcj6h\" (UID: \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\") " pod="openstack/nova-cell0-cell-mapping-fcj6h" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.296959 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r478q\" (UniqueName: \"kubernetes.io/projected/54d85556-ca88-4cbe-9aab-b5505d75d5ed-kube-api-access-r478q\") pod \"nova-cell0-cell-mapping-fcj6h\" (UID: \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\") " pod="openstack/nova-cell0-cell-mapping-fcj6h" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.297001 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54d85556-ca88-4cbe-9aab-b5505d75d5ed-config-data\") pod \"nova-cell0-cell-mapping-fcj6h\" (UID: \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\") " pod="openstack/nova-cell0-cell-mapping-fcj6h" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.304696 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54d85556-ca88-4cbe-9aab-b5505d75d5ed-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-fcj6h\" (UID: \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\") " pod="openstack/nova-cell0-cell-mapping-fcj6h" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.307354 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54d85556-ca88-4cbe-9aab-b5505d75d5ed-scripts\") pod \"nova-cell0-cell-mapping-fcj6h\" (UID: \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\") " pod="openstack/nova-cell0-cell-mapping-fcj6h" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.315643 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54d85556-ca88-4cbe-9aab-b5505d75d5ed-config-data\") pod \"nova-cell0-cell-mapping-fcj6h\" (UID: \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\") " pod="openstack/nova-cell0-cell-mapping-fcj6h" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.337525 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.351373 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.359411 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.365500 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r478q\" (UniqueName: \"kubernetes.io/projected/54d85556-ca88-4cbe-9aab-b5505d75d5ed-kube-api-access-r478q\") pod \"nova-cell0-cell-mapping-fcj6h\" (UID: \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\") " pod="openstack/nova-cell0-cell-mapping-fcj6h" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.365932 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.408004 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-fcj6h" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.409448 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4a0c992-d1d1-496c-b6fe-f947484ba378-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4a0c992-d1d1-496c-b6fe-f947484ba378\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.409539 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4a0c992-d1d1-496c-b6fe-f947484ba378-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4a0c992-d1d1-496c-b6fe-f947484ba378\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.409615 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkvbl\" (UniqueName: \"kubernetes.io/projected/b4a0c992-d1d1-496c-b6fe-f947484ba378-kube-api-access-rkvbl\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4a0c992-d1d1-496c-b6fe-f947484ba378\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.481954 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.483479 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.489120 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.512728 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkvbl\" (UniqueName: \"kubernetes.io/projected/b4a0c992-d1d1-496c-b6fe-f947484ba378-kube-api-access-rkvbl\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4a0c992-d1d1-496c-b6fe-f947484ba378\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.512832 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4a0c992-d1d1-496c-b6fe-f947484ba378-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4a0c992-d1d1-496c-b6fe-f947484ba378\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.512878 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drltg\" (UniqueName: \"kubernetes.io/projected/5d25d4c7-4988-4baf-8b11-8b0c9be42e23-kube-api-access-drltg\") pod \"nova-scheduler-0\" (UID: \"5d25d4c7-4988-4baf-8b11-8b0c9be42e23\") " pod="openstack/nova-scheduler-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.512910 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d25d4c7-4988-4baf-8b11-8b0c9be42e23-config-data\") pod \"nova-scheduler-0\" (UID: \"5d25d4c7-4988-4baf-8b11-8b0c9be42e23\") " pod="openstack/nova-scheduler-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.512936 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4a0c992-d1d1-496c-b6fe-f947484ba378-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4a0c992-d1d1-496c-b6fe-f947484ba378\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.512977 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d25d4c7-4988-4baf-8b11-8b0c9be42e23-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"5d25d4c7-4988-4baf-8b11-8b0c9be42e23\") " pod="openstack/nova-scheduler-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.517983 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.519831 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4a0c992-d1d1-496c-b6fe-f947484ba378-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4a0c992-d1d1-496c-b6fe-f947484ba378\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.520402 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4a0c992-d1d1-496c-b6fe-f947484ba378-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4a0c992-d1d1-496c-b6fe-f947484ba378\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.566488 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkvbl\" (UniqueName: \"kubernetes.io/projected/b4a0c992-d1d1-496c-b6fe-f947484ba378-kube-api-access-rkvbl\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4a0c992-d1d1-496c-b6fe-f947484ba378\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.583284 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.598453 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.615555 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62339922-009a-4a67-93bc-095aa84f945e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"62339922-009a-4a67-93bc-095aa84f945e\") " pod="openstack/nova-api-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.615642 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drltg\" (UniqueName: \"kubernetes.io/projected/5d25d4c7-4988-4baf-8b11-8b0c9be42e23-kube-api-access-drltg\") pod \"nova-scheduler-0\" (UID: \"5d25d4c7-4988-4baf-8b11-8b0c9be42e23\") " pod="openstack/nova-scheduler-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.615672 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cszx\" (UniqueName: \"kubernetes.io/projected/62339922-009a-4a67-93bc-095aa84f945e-kube-api-access-4cszx\") pod \"nova-api-0\" (UID: \"62339922-009a-4a67-93bc-095aa84f945e\") " pod="openstack/nova-api-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.615694 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d25d4c7-4988-4baf-8b11-8b0c9be42e23-config-data\") pod \"nova-scheduler-0\" (UID: \"5d25d4c7-4988-4baf-8b11-8b0c9be42e23\") " pod="openstack/nova-scheduler-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.615720 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/62339922-009a-4a67-93bc-095aa84f945e-logs\") pod \"nova-api-0\" (UID: \"62339922-009a-4a67-93bc-095aa84f945e\") " pod="openstack/nova-api-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.615736 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62339922-009a-4a67-93bc-095aa84f945e-config-data\") pod \"nova-api-0\" (UID: \"62339922-009a-4a67-93bc-095aa84f945e\") " pod="openstack/nova-api-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.615788 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d25d4c7-4988-4baf-8b11-8b0c9be42e23-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"5d25d4c7-4988-4baf-8b11-8b0c9be42e23\") " pod="openstack/nova-scheduler-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.626571 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d25d4c7-4988-4baf-8b11-8b0c9be42e23-config-data\") pod \"nova-scheduler-0\" (UID: \"5d25d4c7-4988-4baf-8b11-8b0c9be42e23\") " pod="openstack/nova-scheduler-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.626599 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d25d4c7-4988-4baf-8b11-8b0c9be42e23-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"5d25d4c7-4988-4baf-8b11-8b0c9be42e23\") " pod="openstack/nova-scheduler-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.666814 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.668531 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.678570 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.685429 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drltg\" (UniqueName: \"kubernetes.io/projected/5d25d4c7-4988-4baf-8b11-8b0c9be42e23-kube-api-access-drltg\") pod \"nova-scheduler-0\" (UID: \"5d25d4c7-4988-4baf-8b11-8b0c9be42e23\") " pod="openstack/nova-scheduler-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.708229 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.717028 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62339922-009a-4a67-93bc-095aa84f945e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"62339922-009a-4a67-93bc-095aa84f945e\") " pod="openstack/nova-api-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.717193 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cszx\" (UniqueName: \"kubernetes.io/projected/62339922-009a-4a67-93bc-095aa84f945e-kube-api-access-4cszx\") pod \"nova-api-0\" (UID: \"62339922-009a-4a67-93bc-095aa84f945e\") " pod="openstack/nova-api-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.717295 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/62339922-009a-4a67-93bc-095aa84f945e-logs\") pod \"nova-api-0\" (UID: \"62339922-009a-4a67-93bc-095aa84f945e\") " pod="openstack/nova-api-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.717454 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62339922-009a-4a67-93bc-095aa84f945e-config-data\") pod \"nova-api-0\" (UID: \"62339922-009a-4a67-93bc-095aa84f945e\") " pod="openstack/nova-api-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.719736 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/62339922-009a-4a67-93bc-095aa84f945e-logs\") pod \"nova-api-0\" (UID: \"62339922-009a-4a67-93bc-095aa84f945e\") " pod="openstack/nova-api-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.722323 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62339922-009a-4a67-93bc-095aa84f945e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"62339922-009a-4a67-93bc-095aa84f945e\") " pod="openstack/nova-api-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.723278 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62339922-009a-4a67-93bc-095aa84f945e-config-data\") pod \"nova-api-0\" (UID: \"62339922-009a-4a67-93bc-095aa84f945e\") " pod="openstack/nova-api-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.779479 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cszx\" (UniqueName: \"kubernetes.io/projected/62339922-009a-4a67-93bc-095aa84f945e-kube-api-access-4cszx\") pod \"nova-api-0\" (UID: \"62339922-009a-4a67-93bc-095aa84f945e\") " pod="openstack/nova-api-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.824885 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28483fe1-41dc-4ca6-a93f-e176242cc717-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"28483fe1-41dc-4ca6-a93f-e176242cc717\") " pod="openstack/nova-metadata-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.825277 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2zqb\" (UniqueName: \"kubernetes.io/projected/28483fe1-41dc-4ca6-a93f-e176242cc717-kube-api-access-d2zqb\") pod \"nova-metadata-0\" (UID: \"28483fe1-41dc-4ca6-a93f-e176242cc717\") " pod="openstack/nova-metadata-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.825357 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28483fe1-41dc-4ca6-a93f-e176242cc717-logs\") pod \"nova-metadata-0\" (UID: \"28483fe1-41dc-4ca6-a93f-e176242cc717\") " pod="openstack/nova-metadata-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.825422 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28483fe1-41dc-4ca6-a93f-e176242cc717-config-data\") pod \"nova-metadata-0\" (UID: \"28483fe1-41dc-4ca6-a93f-e176242cc717\") " pod="openstack/nova-metadata-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.837240 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-4pk9b"] Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.838995 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.861331 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-4pk9b"] Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.894921 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.918255 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.928056 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28483fe1-41dc-4ca6-a93f-e176242cc717-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"28483fe1-41dc-4ca6-a93f-e176242cc717\") " pod="openstack/nova-metadata-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.928158 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2zqb\" (UniqueName: \"kubernetes.io/projected/28483fe1-41dc-4ca6-a93f-e176242cc717-kube-api-access-d2zqb\") pod \"nova-metadata-0\" (UID: \"28483fe1-41dc-4ca6-a93f-e176242cc717\") " pod="openstack/nova-metadata-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.928273 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-4pk9b\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.928312 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-config\") pod \"dnsmasq-dns-bccf8f775-4pk9b\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.928340 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-dns-svc\") pod \"dnsmasq-dns-bccf8f775-4pk9b\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.928378 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-4pk9b\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.928436 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28483fe1-41dc-4ca6-a93f-e176242cc717-logs\") pod \"nova-metadata-0\" (UID: \"28483fe1-41dc-4ca6-a93f-e176242cc717\") " pod="openstack/nova-metadata-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.928507 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28483fe1-41dc-4ca6-a93f-e176242cc717-config-data\") pod \"nova-metadata-0\" (UID: \"28483fe1-41dc-4ca6-a93f-e176242cc717\") " pod="openstack/nova-metadata-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.928548 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-4pk9b\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.928574 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jl5pp\" (UniqueName: \"kubernetes.io/projected/78b07ef1-c929-45b8-b3e2-f0370c174054-kube-api-access-jl5pp\") pod \"dnsmasq-dns-bccf8f775-4pk9b\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.929111 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28483fe1-41dc-4ca6-a93f-e176242cc717-logs\") pod \"nova-metadata-0\" (UID: \"28483fe1-41dc-4ca6-a93f-e176242cc717\") " pod="openstack/nova-metadata-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.934370 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28483fe1-41dc-4ca6-a93f-e176242cc717-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"28483fe1-41dc-4ca6-a93f-e176242cc717\") " pod="openstack/nova-metadata-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.938706 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28483fe1-41dc-4ca6-a93f-e176242cc717-config-data\") pod \"nova-metadata-0\" (UID: \"28483fe1-41dc-4ca6-a93f-e176242cc717\") " pod="openstack/nova-metadata-0" Nov 28 13:42:22 crc kubenswrapper[4857]: I1128 13:42:22.955149 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2zqb\" (UniqueName: \"kubernetes.io/projected/28483fe1-41dc-4ca6-a93f-e176242cc717-kube-api-access-d2zqb\") pod \"nova-metadata-0\" (UID: \"28483fe1-41dc-4ca6-a93f-e176242cc717\") " pod="openstack/nova-metadata-0" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.014349 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.034561 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-4pk9b\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.034616 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-config\") pod \"dnsmasq-dns-bccf8f775-4pk9b\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.034643 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-dns-svc\") pod \"dnsmasq-dns-bccf8f775-4pk9b\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.034678 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-4pk9b\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.034768 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-4pk9b\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.035078 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jl5pp\" (UniqueName: \"kubernetes.io/projected/78b07ef1-c929-45b8-b3e2-f0370c174054-kube-api-access-jl5pp\") pod \"dnsmasq-dns-bccf8f775-4pk9b\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.035396 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-4pk9b\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.041316 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-4pk9b\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.042002 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-config\") pod \"dnsmasq-dns-bccf8f775-4pk9b\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.044008 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-4pk9b\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.044175 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-dns-svc\") pod \"dnsmasq-dns-bccf8f775-4pk9b\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.062396 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jl5pp\" (UniqueName: \"kubernetes.io/projected/78b07ef1-c929-45b8-b3e2-f0370c174054-kube-api-access-jl5pp\") pod \"dnsmasq-dns-bccf8f775-4pk9b\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.184864 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.263166 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.318247 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b4a0c992-d1d1-496c-b6fe-f947484ba378","Type":"ContainerStarted","Data":"443d8dafa74ebd81bace6f1faceff97cf53d31791849aebf67f6986ecb53529f"} Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.330908 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-fcj6h"] Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.385605 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rhznt"] Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.387595 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-rhznt" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.395248 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rhznt"] Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.409301 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.409565 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.498030 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.507003 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.628250 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2x87\" (UniqueName: \"kubernetes.io/projected/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-kube-api-access-k2x87\") pod \"nova-cell1-conductor-db-sync-rhznt\" (UID: \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\") " pod="openstack/nova-cell1-conductor-db-sync-rhznt" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.628373 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-config-data\") pod \"nova-cell1-conductor-db-sync-rhznt\" (UID: \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\") " pod="openstack/nova-cell1-conductor-db-sync-rhznt" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.628398 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-scripts\") pod \"nova-cell1-conductor-db-sync-rhznt\" (UID: \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\") " pod="openstack/nova-cell1-conductor-db-sync-rhznt" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.628428 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-rhznt\" (UID: \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\") " pod="openstack/nova-cell1-conductor-db-sync-rhznt" Nov 28 13:42:23 crc kubenswrapper[4857]: W1128 13:42:23.699268 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28483fe1_41dc_4ca6_a93f_e176242cc717.slice/crio-a60db9357ab240cd84e4fe87f7d2d36ad95d7c0a1a4608a409cb71b18613ac0b WatchSource:0}: Error finding container a60db9357ab240cd84e4fe87f7d2d36ad95d7c0a1a4608a409cb71b18613ac0b: Status 404 returned error can't find the container with id a60db9357ab240cd84e4fe87f7d2d36ad95d7c0a1a4608a409cb71b18613ac0b Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.703152 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.730806 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-config-data\") pod \"nova-cell1-conductor-db-sync-rhznt\" (UID: \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\") " pod="openstack/nova-cell1-conductor-db-sync-rhznt" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.731229 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-scripts\") pod \"nova-cell1-conductor-db-sync-rhznt\" (UID: \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\") " pod="openstack/nova-cell1-conductor-db-sync-rhznt" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.731291 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-rhznt\" (UID: \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\") " pod="openstack/nova-cell1-conductor-db-sync-rhznt" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.731353 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2x87\" (UniqueName: \"kubernetes.io/projected/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-kube-api-access-k2x87\") pod \"nova-cell1-conductor-db-sync-rhznt\" (UID: \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\") " pod="openstack/nova-cell1-conductor-db-sync-rhznt" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.737246 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-scripts\") pod \"nova-cell1-conductor-db-sync-rhznt\" (UID: \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\") " pod="openstack/nova-cell1-conductor-db-sync-rhznt" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.739382 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-rhznt\" (UID: \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\") " pod="openstack/nova-cell1-conductor-db-sync-rhznt" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.741917 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-config-data\") pod \"nova-cell1-conductor-db-sync-rhznt\" (UID: \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\") " pod="openstack/nova-cell1-conductor-db-sync-rhznt" Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.746337 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-4pk9b"] Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.748258 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2x87\" (UniqueName: \"kubernetes.io/projected/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-kube-api-access-k2x87\") pod \"nova-cell1-conductor-db-sync-rhznt\" (UID: \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\") " pod="openstack/nova-cell1-conductor-db-sync-rhznt" Nov 28 13:42:23 crc kubenswrapper[4857]: W1128 13:42:23.757027 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod78b07ef1_c929_45b8_b3e2_f0370c174054.slice/crio-724cb9f647b1a9a098415a89d864d320a373bb3708cf69805af30d05332651c1 WatchSource:0}: Error finding container 724cb9f647b1a9a098415a89d864d320a373bb3708cf69805af30d05332651c1: Status 404 returned error can't find the container with id 724cb9f647b1a9a098415a89d864d320a373bb3708cf69805af30d05332651c1 Nov 28 13:42:23 crc kubenswrapper[4857]: I1128 13:42:23.955035 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-rhznt" Nov 28 13:42:24 crc kubenswrapper[4857]: I1128 13:42:24.338588 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-fcj6h" event={"ID":"54d85556-ca88-4cbe-9aab-b5505d75d5ed","Type":"ContainerStarted","Data":"776ea66e8f82f77c0554d725c1d8158421a9ae7c37c383b7b900b848ca6121a5"} Nov 28 13:42:24 crc kubenswrapper[4857]: I1128 13:42:24.338950 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-fcj6h" event={"ID":"54d85556-ca88-4cbe-9aab-b5505d75d5ed","Type":"ContainerStarted","Data":"39c075b6082dbdb060a2e6a7a0460badf0db1ce9061ab29bc74d4e8ad170bb63"} Nov 28 13:42:24 crc kubenswrapper[4857]: I1128 13:42:24.349546 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"5d25d4c7-4988-4baf-8b11-8b0c9be42e23","Type":"ContainerStarted","Data":"61da513c7c915ce89659370805c3344c94a6d46e98b850a1dc455b8855b68b00"} Nov 28 13:42:24 crc kubenswrapper[4857]: I1128 13:42:24.353352 4857 generic.go:334] "Generic (PLEG): container finished" podID="78b07ef1-c929-45b8-b3e2-f0370c174054" containerID="a43911419dc796745dcba6232a2a82896bc82cd1ff6a08213d38500e17c66c95" exitCode=0 Nov 28 13:42:24 crc kubenswrapper[4857]: I1128 13:42:24.353422 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" event={"ID":"78b07ef1-c929-45b8-b3e2-f0370c174054","Type":"ContainerDied","Data":"a43911419dc796745dcba6232a2a82896bc82cd1ff6a08213d38500e17c66c95"} Nov 28 13:42:24 crc kubenswrapper[4857]: I1128 13:42:24.353451 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" event={"ID":"78b07ef1-c929-45b8-b3e2-f0370c174054","Type":"ContainerStarted","Data":"724cb9f647b1a9a098415a89d864d320a373bb3708cf69805af30d05332651c1"} Nov 28 13:42:24 crc kubenswrapper[4857]: I1128 13:42:24.361329 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"28483fe1-41dc-4ca6-a93f-e176242cc717","Type":"ContainerStarted","Data":"a60db9357ab240cd84e4fe87f7d2d36ad95d7c0a1a4608a409cb71b18613ac0b"} Nov 28 13:42:24 crc kubenswrapper[4857]: I1128 13:42:24.365046 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"62339922-009a-4a67-93bc-095aa84f945e","Type":"ContainerStarted","Data":"112130084e56af62e372d7a4969902439f4c5555d7fc52fbfe4be6f0dcf56a5f"} Nov 28 13:42:24 crc kubenswrapper[4857]: I1128 13:42:24.371929 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-fcj6h" podStartSLOduration=2.371904635 podStartE2EDuration="2.371904635s" podCreationTimestamp="2025-11-28 13:42:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:42:24.357514131 +0000 UTC m=+1436.384889318" watchObservedRunningTime="2025-11-28 13:42:24.371904635 +0000 UTC m=+1436.399279802" Nov 28 13:42:24 crc kubenswrapper[4857]: I1128 13:42:24.460069 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rhznt"] Nov 28 13:42:24 crc kubenswrapper[4857]: W1128 13:42:24.470897 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b35e465_15a4_4f5a_a53a_2fb23b2edeb7.slice/crio-78c54f1464a9165f30c39959007fb6e38bb0007d895ca5432935316832aac98a WatchSource:0}: Error finding container 78c54f1464a9165f30c39959007fb6e38bb0007d895ca5432935316832aac98a: Status 404 returned error can't find the container with id 78c54f1464a9165f30c39959007fb6e38bb0007d895ca5432935316832aac98a Nov 28 13:42:25 crc kubenswrapper[4857]: I1128 13:42:25.389956 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" event={"ID":"78b07ef1-c929-45b8-b3e2-f0370c174054","Type":"ContainerStarted","Data":"63eb95ec2e69722a2f6c90db8f9801bde76c0087e6b2a2cbcd28e91a38b5eb2d"} Nov 28 13:42:25 crc kubenswrapper[4857]: I1128 13:42:25.391290 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:25 crc kubenswrapper[4857]: I1128 13:42:25.399568 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-rhznt" event={"ID":"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7","Type":"ContainerStarted","Data":"f002b9da5d304a77fca31416dd0c123765cbee7f0c7495a3f41715d72eeaf82b"} Nov 28 13:42:25 crc kubenswrapper[4857]: I1128 13:42:25.399605 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-rhznt" event={"ID":"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7","Type":"ContainerStarted","Data":"78c54f1464a9165f30c39959007fb6e38bb0007d895ca5432935316832aac98a"} Nov 28 13:42:25 crc kubenswrapper[4857]: I1128 13:42:25.450826 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" podStartSLOduration=3.450805252 podStartE2EDuration="3.450805252s" podCreationTimestamp="2025-11-28 13:42:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:42:25.447554438 +0000 UTC m=+1437.474929605" watchObservedRunningTime="2025-11-28 13:42:25.450805252 +0000 UTC m=+1437.478180449" Nov 28 13:42:26 crc kubenswrapper[4857]: I1128 13:42:26.482937 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-rhznt" podStartSLOduration=3.48289796 podStartE2EDuration="3.48289796s" podCreationTimestamp="2025-11-28 13:42:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:42:25.476153951 +0000 UTC m=+1437.503529118" watchObservedRunningTime="2025-11-28 13:42:26.48289796 +0000 UTC m=+1438.510273127" Nov 28 13:42:26 crc kubenswrapper[4857]: I1128 13:42:26.498439 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:42:26 crc kubenswrapper[4857]: I1128 13:42:26.511977 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:42:28 crc kubenswrapper[4857]: I1128 13:42:28.446570 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"62339922-009a-4a67-93bc-095aa84f945e","Type":"ContainerStarted","Data":"46d8c5620e55bae59dcdadf7ccf2b686ec5ae9dae41652130b871690631948fe"} Nov 28 13:42:28 crc kubenswrapper[4857]: I1128 13:42:28.455836 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"5d25d4c7-4988-4baf-8b11-8b0c9be42e23","Type":"ContainerStarted","Data":"5efe0f809c40cd40a2ee8b6470ce7ff4ad7de3631f52cff7dd89555f97e66d87"} Nov 28 13:42:28 crc kubenswrapper[4857]: I1128 13:42:28.465063 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"28483fe1-41dc-4ca6-a93f-e176242cc717","Type":"ContainerStarted","Data":"d7f33cecda758901fe6458d61ab180bf778cc3ee27fa8aff247b4eee28844373"} Nov 28 13:42:28 crc kubenswrapper[4857]: I1128 13:42:28.466680 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b4a0c992-d1d1-496c-b6fe-f947484ba378","Type":"ContainerStarted","Data":"e091ae395c36daff41d1d0b042a64c8153752f9277a45b6dbbcb03b48c613724"} Nov 28 13:42:28 crc kubenswrapper[4857]: I1128 13:42:28.466826 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="b4a0c992-d1d1-496c-b6fe-f947484ba378" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://e091ae395c36daff41d1d0b042a64c8153752f9277a45b6dbbcb03b48c613724" gracePeriod=30 Nov 28 13:42:28 crc kubenswrapper[4857]: I1128 13:42:28.494999 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.138704536 podStartE2EDuration="6.494982502s" podCreationTimestamp="2025-11-28 13:42:22 +0000 UTC" firstStartedPulling="2025-11-28 13:42:23.269682727 +0000 UTC m=+1435.297057894" lastFinishedPulling="2025-11-28 13:42:27.625960693 +0000 UTC m=+1439.653335860" observedRunningTime="2025-11-28 13:42:28.494278691 +0000 UTC m=+1440.521653868" watchObservedRunningTime="2025-11-28 13:42:28.494982502 +0000 UTC m=+1440.522357669" Nov 28 13:42:28 crc kubenswrapper[4857]: I1128 13:42:28.517337 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.507819604 podStartE2EDuration="6.517316864s" podCreationTimestamp="2025-11-28 13:42:22 +0000 UTC" firstStartedPulling="2025-11-28 13:42:23.652548461 +0000 UTC m=+1435.679923628" lastFinishedPulling="2025-11-28 13:42:27.662045681 +0000 UTC m=+1439.689420888" observedRunningTime="2025-11-28 13:42:28.507182743 +0000 UTC m=+1440.534557910" watchObservedRunningTime="2025-11-28 13:42:28.517316864 +0000 UTC m=+1440.544692031" Nov 28 13:42:29 crc kubenswrapper[4857]: I1128 13:42:29.452304 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 13:42:29 crc kubenswrapper[4857]: I1128 13:42:29.481658 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="28483fe1-41dc-4ca6-a93f-e176242cc717" containerName="nova-metadata-log" containerID="cri-o://d7f33cecda758901fe6458d61ab180bf778cc3ee27fa8aff247b4eee28844373" gracePeriod=30 Nov 28 13:42:29 crc kubenswrapper[4857]: I1128 13:42:29.481716 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="28483fe1-41dc-4ca6-a93f-e176242cc717" containerName="nova-metadata-metadata" containerID="cri-o://9e25c4f53b4e7182d733841f0ddd3781859a2e751529d26f975377d79e203327" gracePeriod=30 Nov 28 13:42:29 crc kubenswrapper[4857]: I1128 13:42:29.482100 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"28483fe1-41dc-4ca6-a93f-e176242cc717","Type":"ContainerStarted","Data":"9e25c4f53b4e7182d733841f0ddd3781859a2e751529d26f975377d79e203327"} Nov 28 13:42:29 crc kubenswrapper[4857]: I1128 13:42:29.490693 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"62339922-009a-4a67-93bc-095aa84f945e","Type":"ContainerStarted","Data":"97b041f6bed24fcb2db50548f37540c8d4d9885f3ee5e1847404436e539c880d"} Nov 28 13:42:29 crc kubenswrapper[4857]: I1128 13:42:29.523541 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.5688895670000003 podStartE2EDuration="7.523513339s" podCreationTimestamp="2025-11-28 13:42:22 +0000 UTC" firstStartedPulling="2025-11-28 13:42:23.707671846 +0000 UTC m=+1435.735047013" lastFinishedPulling="2025-11-28 13:42:27.662295618 +0000 UTC m=+1439.689670785" observedRunningTime="2025-11-28 13:42:29.508445905 +0000 UTC m=+1441.535821082" watchObservedRunningTime="2025-11-28 13:42:29.523513339 +0000 UTC m=+1441.550888526" Nov 28 13:42:29 crc kubenswrapper[4857]: I1128 13:42:29.532584 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.369222363 podStartE2EDuration="7.532564549s" podCreationTimestamp="2025-11-28 13:42:22 +0000 UTC" firstStartedPulling="2025-11-28 13:42:23.504234204 +0000 UTC m=+1435.531609371" lastFinishedPulling="2025-11-28 13:42:27.66757639 +0000 UTC m=+1439.694951557" observedRunningTime="2025-11-28 13:42:29.528304077 +0000 UTC m=+1441.555679254" watchObservedRunningTime="2025-11-28 13:42:29.532564549 +0000 UTC m=+1441.559939736" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.077559 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.189061 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28483fe1-41dc-4ca6-a93f-e176242cc717-logs\") pod \"28483fe1-41dc-4ca6-a93f-e176242cc717\" (UID: \"28483fe1-41dc-4ca6-a93f-e176242cc717\") " Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.189169 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28483fe1-41dc-4ca6-a93f-e176242cc717-combined-ca-bundle\") pod \"28483fe1-41dc-4ca6-a93f-e176242cc717\" (UID: \"28483fe1-41dc-4ca6-a93f-e176242cc717\") " Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.189266 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2zqb\" (UniqueName: \"kubernetes.io/projected/28483fe1-41dc-4ca6-a93f-e176242cc717-kube-api-access-d2zqb\") pod \"28483fe1-41dc-4ca6-a93f-e176242cc717\" (UID: \"28483fe1-41dc-4ca6-a93f-e176242cc717\") " Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.189305 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28483fe1-41dc-4ca6-a93f-e176242cc717-config-data\") pod \"28483fe1-41dc-4ca6-a93f-e176242cc717\" (UID: \"28483fe1-41dc-4ca6-a93f-e176242cc717\") " Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.190904 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28483fe1-41dc-4ca6-a93f-e176242cc717-logs" (OuterVolumeSpecName: "logs") pod "28483fe1-41dc-4ca6-a93f-e176242cc717" (UID: "28483fe1-41dc-4ca6-a93f-e176242cc717"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.205568 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28483fe1-41dc-4ca6-a93f-e176242cc717-kube-api-access-d2zqb" (OuterVolumeSpecName: "kube-api-access-d2zqb") pod "28483fe1-41dc-4ca6-a93f-e176242cc717" (UID: "28483fe1-41dc-4ca6-a93f-e176242cc717"). InnerVolumeSpecName "kube-api-access-d2zqb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.221022 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28483fe1-41dc-4ca6-a93f-e176242cc717-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "28483fe1-41dc-4ca6-a93f-e176242cc717" (UID: "28483fe1-41dc-4ca6-a93f-e176242cc717"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.233268 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28483fe1-41dc-4ca6-a93f-e176242cc717-config-data" (OuterVolumeSpecName: "config-data") pod "28483fe1-41dc-4ca6-a93f-e176242cc717" (UID: "28483fe1-41dc-4ca6-a93f-e176242cc717"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.294122 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28483fe1-41dc-4ca6-a93f-e176242cc717-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.294393 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28483fe1-41dc-4ca6-a93f-e176242cc717-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.294510 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2zqb\" (UniqueName: \"kubernetes.io/projected/28483fe1-41dc-4ca6-a93f-e176242cc717-kube-api-access-d2zqb\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.294640 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28483fe1-41dc-4ca6-a93f-e176242cc717-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.499861 4857 generic.go:334] "Generic (PLEG): container finished" podID="28483fe1-41dc-4ca6-a93f-e176242cc717" containerID="9e25c4f53b4e7182d733841f0ddd3781859a2e751529d26f975377d79e203327" exitCode=0 Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.499889 4857 generic.go:334] "Generic (PLEG): container finished" podID="28483fe1-41dc-4ca6-a93f-e176242cc717" containerID="d7f33cecda758901fe6458d61ab180bf778cc3ee27fa8aff247b4eee28844373" exitCode=143 Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.500693 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.501137 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"28483fe1-41dc-4ca6-a93f-e176242cc717","Type":"ContainerDied","Data":"9e25c4f53b4e7182d733841f0ddd3781859a2e751529d26f975377d79e203327"} Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.501159 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"28483fe1-41dc-4ca6-a93f-e176242cc717","Type":"ContainerDied","Data":"d7f33cecda758901fe6458d61ab180bf778cc3ee27fa8aff247b4eee28844373"} Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.501170 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"28483fe1-41dc-4ca6-a93f-e176242cc717","Type":"ContainerDied","Data":"a60db9357ab240cd84e4fe87f7d2d36ad95d7c0a1a4608a409cb71b18613ac0b"} Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.501185 4857 scope.go:117] "RemoveContainer" containerID="9e25c4f53b4e7182d733841f0ddd3781859a2e751529d26f975377d79e203327" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.553383 4857 scope.go:117] "RemoveContainer" containerID="d7f33cecda758901fe6458d61ab180bf778cc3ee27fa8aff247b4eee28844373" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.553516 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.603187 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.603440 4857 scope.go:117] "RemoveContainer" containerID="9e25c4f53b4e7182d733841f0ddd3781859a2e751529d26f975377d79e203327" Nov 28 13:42:30 crc kubenswrapper[4857]: E1128 13:42:30.603932 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e25c4f53b4e7182d733841f0ddd3781859a2e751529d26f975377d79e203327\": container with ID starting with 9e25c4f53b4e7182d733841f0ddd3781859a2e751529d26f975377d79e203327 not found: ID does not exist" containerID="9e25c4f53b4e7182d733841f0ddd3781859a2e751529d26f975377d79e203327" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.603971 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e25c4f53b4e7182d733841f0ddd3781859a2e751529d26f975377d79e203327"} err="failed to get container status \"9e25c4f53b4e7182d733841f0ddd3781859a2e751529d26f975377d79e203327\": rpc error: code = NotFound desc = could not find container \"9e25c4f53b4e7182d733841f0ddd3781859a2e751529d26f975377d79e203327\": container with ID starting with 9e25c4f53b4e7182d733841f0ddd3781859a2e751529d26f975377d79e203327 not found: ID does not exist" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.603993 4857 scope.go:117] "RemoveContainer" containerID="d7f33cecda758901fe6458d61ab180bf778cc3ee27fa8aff247b4eee28844373" Nov 28 13:42:30 crc kubenswrapper[4857]: E1128 13:42:30.604304 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7f33cecda758901fe6458d61ab180bf778cc3ee27fa8aff247b4eee28844373\": container with ID starting with d7f33cecda758901fe6458d61ab180bf778cc3ee27fa8aff247b4eee28844373 not found: ID does not exist" containerID="d7f33cecda758901fe6458d61ab180bf778cc3ee27fa8aff247b4eee28844373" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.604326 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7f33cecda758901fe6458d61ab180bf778cc3ee27fa8aff247b4eee28844373"} err="failed to get container status \"d7f33cecda758901fe6458d61ab180bf778cc3ee27fa8aff247b4eee28844373\": rpc error: code = NotFound desc = could not find container \"d7f33cecda758901fe6458d61ab180bf778cc3ee27fa8aff247b4eee28844373\": container with ID starting with d7f33cecda758901fe6458d61ab180bf778cc3ee27fa8aff247b4eee28844373 not found: ID does not exist" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.604338 4857 scope.go:117] "RemoveContainer" containerID="9e25c4f53b4e7182d733841f0ddd3781859a2e751529d26f975377d79e203327" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.605341 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e25c4f53b4e7182d733841f0ddd3781859a2e751529d26f975377d79e203327"} err="failed to get container status \"9e25c4f53b4e7182d733841f0ddd3781859a2e751529d26f975377d79e203327\": rpc error: code = NotFound desc = could not find container \"9e25c4f53b4e7182d733841f0ddd3781859a2e751529d26f975377d79e203327\": container with ID starting with 9e25c4f53b4e7182d733841f0ddd3781859a2e751529d26f975377d79e203327 not found: ID does not exist" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.605362 4857 scope.go:117] "RemoveContainer" containerID="d7f33cecda758901fe6458d61ab180bf778cc3ee27fa8aff247b4eee28844373" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.606005 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7f33cecda758901fe6458d61ab180bf778cc3ee27fa8aff247b4eee28844373"} err="failed to get container status \"d7f33cecda758901fe6458d61ab180bf778cc3ee27fa8aff247b4eee28844373\": rpc error: code = NotFound desc = could not find container \"d7f33cecda758901fe6458d61ab180bf778cc3ee27fa8aff247b4eee28844373\": container with ID starting with d7f33cecda758901fe6458d61ab180bf778cc3ee27fa8aff247b4eee28844373 not found: ID does not exist" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.615822 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:42:30 crc kubenswrapper[4857]: E1128 13:42:30.616302 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28483fe1-41dc-4ca6-a93f-e176242cc717" containerName="nova-metadata-metadata" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.616372 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="28483fe1-41dc-4ca6-a93f-e176242cc717" containerName="nova-metadata-metadata" Nov 28 13:42:30 crc kubenswrapper[4857]: E1128 13:42:30.616387 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28483fe1-41dc-4ca6-a93f-e176242cc717" containerName="nova-metadata-log" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.616394 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="28483fe1-41dc-4ca6-a93f-e176242cc717" containerName="nova-metadata-log" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.616609 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="28483fe1-41dc-4ca6-a93f-e176242cc717" containerName="nova-metadata-metadata" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.616680 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="28483fe1-41dc-4ca6-a93f-e176242cc717" containerName="nova-metadata-log" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.618040 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.620940 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.621116 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.626059 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.701053 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-logs\") pod \"nova-metadata-0\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " pod="openstack/nova-metadata-0" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.701102 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-config-data\") pod \"nova-metadata-0\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " pod="openstack/nova-metadata-0" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.701130 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " pod="openstack/nova-metadata-0" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.701217 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5s6r\" (UniqueName: \"kubernetes.io/projected/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-kube-api-access-g5s6r\") pod \"nova-metadata-0\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " pod="openstack/nova-metadata-0" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.701255 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " pod="openstack/nova-metadata-0" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.802888 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5s6r\" (UniqueName: \"kubernetes.io/projected/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-kube-api-access-g5s6r\") pod \"nova-metadata-0\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " pod="openstack/nova-metadata-0" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.802976 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " pod="openstack/nova-metadata-0" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.803051 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-logs\") pod \"nova-metadata-0\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " pod="openstack/nova-metadata-0" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.803094 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-config-data\") pod \"nova-metadata-0\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " pod="openstack/nova-metadata-0" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.803125 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " pod="openstack/nova-metadata-0" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.803779 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-logs\") pod \"nova-metadata-0\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " pod="openstack/nova-metadata-0" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.807698 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " pod="openstack/nova-metadata-0" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.807741 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " pod="openstack/nova-metadata-0" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.812548 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-config-data\") pod \"nova-metadata-0\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " pod="openstack/nova-metadata-0" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.818125 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5s6r\" (UniqueName: \"kubernetes.io/projected/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-kube-api-access-g5s6r\") pod \"nova-metadata-0\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " pod="openstack/nova-metadata-0" Nov 28 13:42:30 crc kubenswrapper[4857]: I1128 13:42:30.938348 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:42:31 crc kubenswrapper[4857]: I1128 13:42:31.395428 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:42:31 crc kubenswrapper[4857]: I1128 13:42:31.508820 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8","Type":"ContainerStarted","Data":"04075a31ce8357422970b1386450c4eea398b8595e5741f059d4b7379d81c0ad"} Nov 28 13:42:31 crc kubenswrapper[4857]: I1128 13:42:31.511650 4857 generic.go:334] "Generic (PLEG): container finished" podID="54d85556-ca88-4cbe-9aab-b5505d75d5ed" containerID="776ea66e8f82f77c0554d725c1d8158421a9ae7c37c383b7b900b848ca6121a5" exitCode=0 Nov 28 13:42:31 crc kubenswrapper[4857]: I1128 13:42:31.511704 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-fcj6h" event={"ID":"54d85556-ca88-4cbe-9aab-b5505d75d5ed","Type":"ContainerDied","Data":"776ea66e8f82f77c0554d725c1d8158421a9ae7c37c383b7b900b848ca6121a5"} Nov 28 13:42:32 crc kubenswrapper[4857]: I1128 13:42:32.319286 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28483fe1-41dc-4ca6-a93f-e176242cc717" path="/var/lib/kubelet/pods/28483fe1-41dc-4ca6-a93f-e176242cc717/volumes" Nov 28 13:42:32 crc kubenswrapper[4857]: I1128 13:42:32.522567 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8","Type":"ContainerStarted","Data":"51ae3fdd989f88a236a7ff685ff34cd4f0a81ef9ab0cf8bc227be57da6b1b277"} Nov 28 13:42:32 crc kubenswrapper[4857]: I1128 13:42:32.522617 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8","Type":"ContainerStarted","Data":"7b74b7597b5eb8e52fbe272b8429115837ba831567575574502aaffd9d886080"} Nov 28 13:42:32 crc kubenswrapper[4857]: I1128 13:42:32.550454 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.550434862 podStartE2EDuration="2.550434862s" podCreationTimestamp="2025-11-28 13:42:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:42:32.546248782 +0000 UTC m=+1444.573623949" watchObservedRunningTime="2025-11-28 13:42:32.550434862 +0000 UTC m=+1444.577810039" Nov 28 13:42:32 crc kubenswrapper[4857]: I1128 13:42:32.599710 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:32 crc kubenswrapper[4857]: I1128 13:42:32.896463 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 13:42:32 crc kubenswrapper[4857]: I1128 13:42:32.896897 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 13:42:32 crc kubenswrapper[4857]: I1128 13:42:32.909161 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-fcj6h" Nov 28 13:42:32 crc kubenswrapper[4857]: I1128 13:42:32.921024 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 13:42:32 crc kubenswrapper[4857]: I1128 13:42:32.921061 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 13:42:32 crc kubenswrapper[4857]: I1128 13:42:32.933898 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.048107 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r478q\" (UniqueName: \"kubernetes.io/projected/54d85556-ca88-4cbe-9aab-b5505d75d5ed-kube-api-access-r478q\") pod \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\" (UID: \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\") " Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.048327 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54d85556-ca88-4cbe-9aab-b5505d75d5ed-scripts\") pod \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\" (UID: \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\") " Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.048432 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54d85556-ca88-4cbe-9aab-b5505d75d5ed-config-data\") pod \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\" (UID: \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\") " Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.048465 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54d85556-ca88-4cbe-9aab-b5505d75d5ed-combined-ca-bundle\") pod \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\" (UID: \"54d85556-ca88-4cbe-9aab-b5505d75d5ed\") " Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.055430 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54d85556-ca88-4cbe-9aab-b5505d75d5ed-kube-api-access-r478q" (OuterVolumeSpecName: "kube-api-access-r478q") pod "54d85556-ca88-4cbe-9aab-b5505d75d5ed" (UID: "54d85556-ca88-4cbe-9aab-b5505d75d5ed"). InnerVolumeSpecName "kube-api-access-r478q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.055546 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54d85556-ca88-4cbe-9aab-b5505d75d5ed-scripts" (OuterVolumeSpecName: "scripts") pod "54d85556-ca88-4cbe-9aab-b5505d75d5ed" (UID: "54d85556-ca88-4cbe-9aab-b5505d75d5ed"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.083998 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54d85556-ca88-4cbe-9aab-b5505d75d5ed-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "54d85556-ca88-4cbe-9aab-b5505d75d5ed" (UID: "54d85556-ca88-4cbe-9aab-b5505d75d5ed"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.088931 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54d85556-ca88-4cbe-9aab-b5505d75d5ed-config-data" (OuterVolumeSpecName: "config-data") pod "54d85556-ca88-4cbe-9aab-b5505d75d5ed" (UID: "54d85556-ca88-4cbe-9aab-b5505d75d5ed"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.150933 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54d85556-ca88-4cbe-9aab-b5505d75d5ed-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.150965 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54d85556-ca88-4cbe-9aab-b5505d75d5ed-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.150977 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54d85556-ca88-4cbe-9aab-b5505d75d5ed-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.150992 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r478q\" (UniqueName: \"kubernetes.io/projected/54d85556-ca88-4cbe-9aab-b5505d75d5ed-kube-api-access-r478q\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.186899 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.266013 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-gcnr8"] Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.266237 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" podUID="a6e77d16-341a-4f84-8427-82712eb6541f" containerName="dnsmasq-dns" containerID="cri-o://aee9ed79f9bfca814a082362b7a417713d91125d8fa079c602bc4e8a91039739" gracePeriod=10 Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.427004 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" podUID="a6e77d16-341a-4f84-8427-82712eb6541f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.159:5353: connect: connection refused" Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.534068 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-fcj6h" event={"ID":"54d85556-ca88-4cbe-9aab-b5505d75d5ed","Type":"ContainerDied","Data":"39c075b6082dbdb060a2e6a7a0460badf0db1ce9061ab29bc74d4e8ad170bb63"} Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.534123 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39c075b6082dbdb060a2e6a7a0460badf0db1ce9061ab29bc74d4e8ad170bb63" Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.534194 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-fcj6h" Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.590216 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.744564 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.744830 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="62339922-009a-4a67-93bc-095aa84f945e" containerName="nova-api-log" containerID="cri-o://46d8c5620e55bae59dcdadf7ccf2b686ec5ae9dae41652130b871690631948fe" gracePeriod=30 Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.744931 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="62339922-009a-4a67-93bc-095aa84f945e" containerName="nova-api-api" containerID="cri-o://97b041f6bed24fcb2db50548f37540c8d4d9885f3ee5e1847404436e539c880d" gracePeriod=30 Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.751867 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="62339922-009a-4a67-93bc-095aa84f945e" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.184:8774/\": EOF" Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.751892 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="62339922-009a-4a67-93bc-095aa84f945e" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.184:8774/\": EOF" Nov 28 13:42:33 crc kubenswrapper[4857]: I1128 13:42:33.774480 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.037352 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.037909 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="db66469a-ca4a-4f4b-b657-70bf41cd45db" containerName="kube-state-metrics" containerID="cri-o://df790fb9ecc64103ecf13f34ef5d893a9319a46e50680cc017a6bb17fda529ce" gracePeriod=30 Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.078908 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.396215 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.597949 4857 generic.go:334] "Generic (PLEG): container finished" podID="62339922-009a-4a67-93bc-095aa84f945e" containerID="46d8c5620e55bae59dcdadf7ccf2b686ec5ae9dae41652130b871690631948fe" exitCode=143 Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.598032 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"62339922-009a-4a67-93bc-095aa84f945e","Type":"ContainerDied","Data":"46d8c5620e55bae59dcdadf7ccf2b686ec5ae9dae41652130b871690631948fe"} Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.609237 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-config\") pod \"a6e77d16-341a-4f84-8427-82712eb6541f\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.609282 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-dns-swift-storage-0\") pod \"a6e77d16-341a-4f84-8427-82712eb6541f\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.609362 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-ovsdbserver-sb\") pod \"a6e77d16-341a-4f84-8427-82712eb6541f\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.609427 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-dns-svc\") pod \"a6e77d16-341a-4f84-8427-82712eb6541f\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.609504 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-ovsdbserver-nb\") pod \"a6e77d16-341a-4f84-8427-82712eb6541f\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.609560 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-snc64\" (UniqueName: \"kubernetes.io/projected/a6e77d16-341a-4f84-8427-82712eb6541f-kube-api-access-snc64\") pod \"a6e77d16-341a-4f84-8427-82712eb6541f\" (UID: \"a6e77d16-341a-4f84-8427-82712eb6541f\") " Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.620387 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6e77d16-341a-4f84-8427-82712eb6541f-kube-api-access-snc64" (OuterVolumeSpecName: "kube-api-access-snc64") pod "a6e77d16-341a-4f84-8427-82712eb6541f" (UID: "a6e77d16-341a-4f84-8427-82712eb6541f"). InnerVolumeSpecName "kube-api-access-snc64". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.621662 4857 generic.go:334] "Generic (PLEG): container finished" podID="a6e77d16-341a-4f84-8427-82712eb6541f" containerID="aee9ed79f9bfca814a082362b7a417713d91125d8fa079c602bc4e8a91039739" exitCode=0 Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.621726 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" event={"ID":"a6e77d16-341a-4f84-8427-82712eb6541f","Type":"ContainerDied","Data":"aee9ed79f9bfca814a082362b7a417713d91125d8fa079c602bc4e8a91039739"} Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.621777 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" event={"ID":"a6e77d16-341a-4f84-8427-82712eb6541f","Type":"ContainerDied","Data":"3d7c5a95cc17b61e5779079b5ed8f66cbc86adaac85d0b25a34560b3785518f8"} Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.621800 4857 scope.go:117] "RemoveContainer" containerID="aee9ed79f9bfca814a082362b7a417713d91125d8fa079c602bc4e8a91039739" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.621944 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-gcnr8" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.630812 4857 generic.go:334] "Generic (PLEG): container finished" podID="db66469a-ca4a-4f4b-b657-70bf41cd45db" containerID="df790fb9ecc64103ecf13f34ef5d893a9319a46e50680cc017a6bb17fda529ce" exitCode=2 Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.631058 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8" containerName="nova-metadata-log" containerID="cri-o://7b74b7597b5eb8e52fbe272b8429115837ba831567575574502aaffd9d886080" gracePeriod=30 Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.631169 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"db66469a-ca4a-4f4b-b657-70bf41cd45db","Type":"ContainerDied","Data":"df790fb9ecc64103ecf13f34ef5d893a9319a46e50680cc017a6bb17fda529ce"} Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.631953 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8" containerName="nova-metadata-metadata" containerID="cri-o://51ae3fdd989f88a236a7ff685ff34cd4f0a81ef9ab0cf8bc227be57da6b1b277" gracePeriod=30 Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.678548 4857 scope.go:117] "RemoveContainer" containerID="407addd6ca088094b297b7f7f920655b1a8c2631adf913b9cc4c9d7a693417a5" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.710497 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.712729 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-snc64\" (UniqueName: \"kubernetes.io/projected/a6e77d16-341a-4f84-8427-82712eb6541f-kube-api-access-snc64\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.717873 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a6e77d16-341a-4f84-8427-82712eb6541f" (UID: "a6e77d16-341a-4f84-8427-82712eb6541f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.739104 4857 scope.go:117] "RemoveContainer" containerID="aee9ed79f9bfca814a082362b7a417713d91125d8fa079c602bc4e8a91039739" Nov 28 13:42:34 crc kubenswrapper[4857]: E1128 13:42:34.745710 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aee9ed79f9bfca814a082362b7a417713d91125d8fa079c602bc4e8a91039739\": container with ID starting with aee9ed79f9bfca814a082362b7a417713d91125d8fa079c602bc4e8a91039739 not found: ID does not exist" containerID="aee9ed79f9bfca814a082362b7a417713d91125d8fa079c602bc4e8a91039739" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.745763 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aee9ed79f9bfca814a082362b7a417713d91125d8fa079c602bc4e8a91039739"} err="failed to get container status \"aee9ed79f9bfca814a082362b7a417713d91125d8fa079c602bc4e8a91039739\": rpc error: code = NotFound desc = could not find container \"aee9ed79f9bfca814a082362b7a417713d91125d8fa079c602bc4e8a91039739\": container with ID starting with aee9ed79f9bfca814a082362b7a417713d91125d8fa079c602bc4e8a91039739 not found: ID does not exist" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.745787 4857 scope.go:117] "RemoveContainer" containerID="407addd6ca088094b297b7f7f920655b1a8c2631adf913b9cc4c9d7a693417a5" Nov 28 13:42:34 crc kubenswrapper[4857]: E1128 13:42:34.751785 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"407addd6ca088094b297b7f7f920655b1a8c2631adf913b9cc4c9d7a693417a5\": container with ID starting with 407addd6ca088094b297b7f7f920655b1a8c2631adf913b9cc4c9d7a693417a5 not found: ID does not exist" containerID="407addd6ca088094b297b7f7f920655b1a8c2631adf913b9cc4c9d7a693417a5" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.751830 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"407addd6ca088094b297b7f7f920655b1a8c2631adf913b9cc4c9d7a693417a5"} err="failed to get container status \"407addd6ca088094b297b7f7f920655b1a8c2631adf913b9cc4c9d7a693417a5\": rpc error: code = NotFound desc = could not find container \"407addd6ca088094b297b7f7f920655b1a8c2631adf913b9cc4c9d7a693417a5\": container with ID starting with 407addd6ca088094b297b7f7f920655b1a8c2631adf913b9cc4c9d7a693417a5 not found: ID does not exist" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.758160 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a6e77d16-341a-4f84-8427-82712eb6541f" (UID: "a6e77d16-341a-4f84-8427-82712eb6541f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.758677 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-config" (OuterVolumeSpecName: "config") pod "a6e77d16-341a-4f84-8427-82712eb6541f" (UID: "a6e77d16-341a-4f84-8427-82712eb6541f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.763533 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a6e77d16-341a-4f84-8427-82712eb6541f" (UID: "a6e77d16-341a-4f84-8427-82712eb6541f"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.771631 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a6e77d16-341a-4f84-8427-82712eb6541f" (UID: "a6e77d16-341a-4f84-8427-82712eb6541f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.813712 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvtbt\" (UniqueName: \"kubernetes.io/projected/db66469a-ca4a-4f4b-b657-70bf41cd45db-kube-api-access-bvtbt\") pod \"db66469a-ca4a-4f4b-b657-70bf41cd45db\" (UID: \"db66469a-ca4a-4f4b-b657-70bf41cd45db\") " Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.814306 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.814326 4857 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.814336 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.814345 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.814354 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6e77d16-341a-4f84-8427-82712eb6541f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.821033 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db66469a-ca4a-4f4b-b657-70bf41cd45db-kube-api-access-bvtbt" (OuterVolumeSpecName: "kube-api-access-bvtbt") pod "db66469a-ca4a-4f4b-b657-70bf41cd45db" (UID: "db66469a-ca4a-4f4b-b657-70bf41cd45db"). InnerVolumeSpecName "kube-api-access-bvtbt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.916014 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvtbt\" (UniqueName: \"kubernetes.io/projected/db66469a-ca4a-4f4b-b657-70bf41cd45db-kube-api-access-bvtbt\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.959430 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-gcnr8"] Nov 28 13:42:34 crc kubenswrapper[4857]: I1128 13:42:34.971092 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-gcnr8"] Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.244136 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.423996 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-combined-ca-bundle\") pod \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.424046 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-config-data\") pod \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.424207 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-logs\") pod \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.424242 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5s6r\" (UniqueName: \"kubernetes.io/projected/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-kube-api-access-g5s6r\") pod \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.424273 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-nova-metadata-tls-certs\") pod \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\" (UID: \"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8\") " Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.424496 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-logs" (OuterVolumeSpecName: "logs") pod "d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8" (UID: "d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.425190 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.427552 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-kube-api-access-g5s6r" (OuterVolumeSpecName: "kube-api-access-g5s6r") pod "d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8" (UID: "d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8"). InnerVolumeSpecName "kube-api-access-g5s6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.452872 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8" (UID: "d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.454162 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-config-data" (OuterVolumeSpecName: "config-data") pod "d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8" (UID: "d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.500034 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8" (UID: "d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.527392 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.527445 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.527457 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5s6r\" (UniqueName: \"kubernetes.io/projected/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-kube-api-access-g5s6r\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.527467 4857 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.640605 4857 generic.go:334] "Generic (PLEG): container finished" podID="d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8" containerID="51ae3fdd989f88a236a7ff685ff34cd4f0a81ef9ab0cf8bc227be57da6b1b277" exitCode=0 Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.641712 4857 generic.go:334] "Generic (PLEG): container finished" podID="d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8" containerID="7b74b7597b5eb8e52fbe272b8429115837ba831567575574502aaffd9d886080" exitCode=143 Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.640677 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.640665 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8","Type":"ContainerDied","Data":"51ae3fdd989f88a236a7ff685ff34cd4f0a81ef9ab0cf8bc227be57da6b1b277"} Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.642031 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8","Type":"ContainerDied","Data":"7b74b7597b5eb8e52fbe272b8429115837ba831567575574502aaffd9d886080"} Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.642047 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8","Type":"ContainerDied","Data":"04075a31ce8357422970b1386450c4eea398b8595e5741f059d4b7379d81c0ad"} Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.642065 4857 scope.go:117] "RemoveContainer" containerID="51ae3fdd989f88a236a7ff685ff34cd4f0a81ef9ab0cf8bc227be57da6b1b277" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.645010 4857 generic.go:334] "Generic (PLEG): container finished" podID="7b35e465-15a4-4f5a-a53a-2fb23b2edeb7" containerID="f002b9da5d304a77fca31416dd0c123765cbee7f0c7495a3f41715d72eeaf82b" exitCode=0 Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.645068 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-rhznt" event={"ID":"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7","Type":"ContainerDied","Data":"f002b9da5d304a77fca31416dd0c123765cbee7f0c7495a3f41715d72eeaf82b"} Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.650365 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="5d25d4c7-4988-4baf-8b11-8b0c9be42e23" containerName="nova-scheduler-scheduler" containerID="cri-o://5efe0f809c40cd40a2ee8b6470ce7ff4ad7de3631f52cff7dd89555f97e66d87" gracePeriod=30 Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.650601 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.651539 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"db66469a-ca4a-4f4b-b657-70bf41cd45db","Type":"ContainerDied","Data":"8e12486fb2e5fb082001f8616760880207aa13f08d8847c07943bbb3e886edb5"} Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.670808 4857 scope.go:117] "RemoveContainer" containerID="7b74b7597b5eb8e52fbe272b8429115837ba831567575574502aaffd9d886080" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.697420 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.701823 4857 scope.go:117] "RemoveContainer" containerID="51ae3fdd989f88a236a7ff685ff34cd4f0a81ef9ab0cf8bc227be57da6b1b277" Nov 28 13:42:35 crc kubenswrapper[4857]: E1128 13:42:35.702199 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51ae3fdd989f88a236a7ff685ff34cd4f0a81ef9ab0cf8bc227be57da6b1b277\": container with ID starting with 51ae3fdd989f88a236a7ff685ff34cd4f0a81ef9ab0cf8bc227be57da6b1b277 not found: ID does not exist" containerID="51ae3fdd989f88a236a7ff685ff34cd4f0a81ef9ab0cf8bc227be57da6b1b277" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.702291 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51ae3fdd989f88a236a7ff685ff34cd4f0a81ef9ab0cf8bc227be57da6b1b277"} err="failed to get container status \"51ae3fdd989f88a236a7ff685ff34cd4f0a81ef9ab0cf8bc227be57da6b1b277\": rpc error: code = NotFound desc = could not find container \"51ae3fdd989f88a236a7ff685ff34cd4f0a81ef9ab0cf8bc227be57da6b1b277\": container with ID starting with 51ae3fdd989f88a236a7ff685ff34cd4f0a81ef9ab0cf8bc227be57da6b1b277 not found: ID does not exist" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.702379 4857 scope.go:117] "RemoveContainer" containerID="7b74b7597b5eb8e52fbe272b8429115837ba831567575574502aaffd9d886080" Nov 28 13:42:35 crc kubenswrapper[4857]: E1128 13:42:35.706304 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b74b7597b5eb8e52fbe272b8429115837ba831567575574502aaffd9d886080\": container with ID starting with 7b74b7597b5eb8e52fbe272b8429115837ba831567575574502aaffd9d886080 not found: ID does not exist" containerID="7b74b7597b5eb8e52fbe272b8429115837ba831567575574502aaffd9d886080" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.706418 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b74b7597b5eb8e52fbe272b8429115837ba831567575574502aaffd9d886080"} err="failed to get container status \"7b74b7597b5eb8e52fbe272b8429115837ba831567575574502aaffd9d886080\": rpc error: code = NotFound desc = could not find container \"7b74b7597b5eb8e52fbe272b8429115837ba831567575574502aaffd9d886080\": container with ID starting with 7b74b7597b5eb8e52fbe272b8429115837ba831567575574502aaffd9d886080 not found: ID does not exist" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.706519 4857 scope.go:117] "RemoveContainer" containerID="51ae3fdd989f88a236a7ff685ff34cd4f0a81ef9ab0cf8bc227be57da6b1b277" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.706872 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.707104 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51ae3fdd989f88a236a7ff685ff34cd4f0a81ef9ab0cf8bc227be57da6b1b277"} err="failed to get container status \"51ae3fdd989f88a236a7ff685ff34cd4f0a81ef9ab0cf8bc227be57da6b1b277\": rpc error: code = NotFound desc = could not find container \"51ae3fdd989f88a236a7ff685ff34cd4f0a81ef9ab0cf8bc227be57da6b1b277\": container with ID starting with 51ae3fdd989f88a236a7ff685ff34cd4f0a81ef9ab0cf8bc227be57da6b1b277 not found: ID does not exist" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.707186 4857 scope.go:117] "RemoveContainer" containerID="7b74b7597b5eb8e52fbe272b8429115837ba831567575574502aaffd9d886080" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.707612 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b74b7597b5eb8e52fbe272b8429115837ba831567575574502aaffd9d886080"} err="failed to get container status \"7b74b7597b5eb8e52fbe272b8429115837ba831567575574502aaffd9d886080\": rpc error: code = NotFound desc = could not find container \"7b74b7597b5eb8e52fbe272b8429115837ba831567575574502aaffd9d886080\": container with ID starting with 7b74b7597b5eb8e52fbe272b8429115837ba831567575574502aaffd9d886080 not found: ID does not exist" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.707696 4857 scope.go:117] "RemoveContainer" containerID="df790fb9ecc64103ecf13f34ef5d893a9319a46e50680cc017a6bb17fda529ce" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.723794 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:42:35 crc kubenswrapper[4857]: E1128 13:42:35.724428 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6e77d16-341a-4f84-8427-82712eb6541f" containerName="init" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.724534 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6e77d16-341a-4f84-8427-82712eb6541f" containerName="init" Nov 28 13:42:35 crc kubenswrapper[4857]: E1128 13:42:35.724679 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6e77d16-341a-4f84-8427-82712eb6541f" containerName="dnsmasq-dns" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.724740 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6e77d16-341a-4f84-8427-82712eb6541f" containerName="dnsmasq-dns" Nov 28 13:42:35 crc kubenswrapper[4857]: E1128 13:42:35.724818 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8" containerName="nova-metadata-metadata" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.724868 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8" containerName="nova-metadata-metadata" Nov 28 13:42:35 crc kubenswrapper[4857]: E1128 13:42:35.724931 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db66469a-ca4a-4f4b-b657-70bf41cd45db" containerName="kube-state-metrics" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.724990 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="db66469a-ca4a-4f4b-b657-70bf41cd45db" containerName="kube-state-metrics" Nov 28 13:42:35 crc kubenswrapper[4857]: E1128 13:42:35.725066 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54d85556-ca88-4cbe-9aab-b5505d75d5ed" containerName="nova-manage" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.725117 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="54d85556-ca88-4cbe-9aab-b5505d75d5ed" containerName="nova-manage" Nov 28 13:42:35 crc kubenswrapper[4857]: E1128 13:42:35.725179 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8" containerName="nova-metadata-log" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.725230 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8" containerName="nova-metadata-log" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.725444 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6e77d16-341a-4f84-8427-82712eb6541f" containerName="dnsmasq-dns" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.725524 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="54d85556-ca88-4cbe-9aab-b5505d75d5ed" containerName="nova-manage" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.725593 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8" containerName="nova-metadata-log" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.725652 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="db66469a-ca4a-4f4b-b657-70bf41cd45db" containerName="kube-state-metrics" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.725712 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8" containerName="nova-metadata-metadata" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.726391 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.732188 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.732395 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.733781 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.751819 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.753604 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.765785 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.767220 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.771215 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.771453 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.779464 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.833519 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpf68\" (UniqueName: \"kubernetes.io/projected/1fa6d725-8054-46f1-8c0c-c693d5306563-kube-api-access-zpf68\") pod \"kube-state-metrics-0\" (UID: \"1fa6d725-8054-46f1-8c0c-c693d5306563\") " pod="openstack/kube-state-metrics-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.833627 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fa6d725-8054-46f1-8c0c-c693d5306563-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"1fa6d725-8054-46f1-8c0c-c693d5306563\") " pod="openstack/kube-state-metrics-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.833699 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/1fa6d725-8054-46f1-8c0c-c693d5306563-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"1fa6d725-8054-46f1-8c0c-c693d5306563\") " pod="openstack/kube-state-metrics-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.833740 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fa6d725-8054-46f1-8c0c-c693d5306563-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"1fa6d725-8054-46f1-8c0c-c693d5306563\") " pod="openstack/kube-state-metrics-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.935588 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jbdt\" (UniqueName: \"kubernetes.io/projected/b57e1273-601c-4338-90bd-9047ae46b65a-kube-api-access-6jbdt\") pod \"nova-metadata-0\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " pod="openstack/nova-metadata-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.935923 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpf68\" (UniqueName: \"kubernetes.io/projected/1fa6d725-8054-46f1-8c0c-c693d5306563-kube-api-access-zpf68\") pod \"kube-state-metrics-0\" (UID: \"1fa6d725-8054-46f1-8c0c-c693d5306563\") " pod="openstack/kube-state-metrics-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.936055 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fa6d725-8054-46f1-8c0c-c693d5306563-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"1fa6d725-8054-46f1-8c0c-c693d5306563\") " pod="openstack/kube-state-metrics-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.936238 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b57e1273-601c-4338-90bd-9047ae46b65a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " pod="openstack/nova-metadata-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.936348 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b57e1273-601c-4338-90bd-9047ae46b65a-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " pod="openstack/nova-metadata-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.936450 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/1fa6d725-8054-46f1-8c0c-c693d5306563-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"1fa6d725-8054-46f1-8c0c-c693d5306563\") " pod="openstack/kube-state-metrics-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.936570 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b57e1273-601c-4338-90bd-9047ae46b65a-config-data\") pod \"nova-metadata-0\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " pod="openstack/nova-metadata-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.936677 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fa6d725-8054-46f1-8c0c-c693d5306563-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"1fa6d725-8054-46f1-8c0c-c693d5306563\") " pod="openstack/kube-state-metrics-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.936836 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b57e1273-601c-4338-90bd-9047ae46b65a-logs\") pod \"nova-metadata-0\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " pod="openstack/nova-metadata-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.940348 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fa6d725-8054-46f1-8c0c-c693d5306563-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"1fa6d725-8054-46f1-8c0c-c693d5306563\") " pod="openstack/kube-state-metrics-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.940928 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/1fa6d725-8054-46f1-8c0c-c693d5306563-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"1fa6d725-8054-46f1-8c0c-c693d5306563\") " pod="openstack/kube-state-metrics-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.950248 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fa6d725-8054-46f1-8c0c-c693d5306563-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"1fa6d725-8054-46f1-8c0c-c693d5306563\") " pod="openstack/kube-state-metrics-0" Nov 28 13:42:35 crc kubenswrapper[4857]: I1128 13:42:35.961192 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpf68\" (UniqueName: \"kubernetes.io/projected/1fa6d725-8054-46f1-8c0c-c693d5306563-kube-api-access-zpf68\") pod \"kube-state-metrics-0\" (UID: \"1fa6d725-8054-46f1-8c0c-c693d5306563\") " pod="openstack/kube-state-metrics-0" Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.037686 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jbdt\" (UniqueName: \"kubernetes.io/projected/b57e1273-601c-4338-90bd-9047ae46b65a-kube-api-access-6jbdt\") pod \"nova-metadata-0\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " pod="openstack/nova-metadata-0" Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.037833 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b57e1273-601c-4338-90bd-9047ae46b65a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " pod="openstack/nova-metadata-0" Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.037860 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b57e1273-601c-4338-90bd-9047ae46b65a-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " pod="openstack/nova-metadata-0" Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.037904 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b57e1273-601c-4338-90bd-9047ae46b65a-config-data\") pod \"nova-metadata-0\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " pod="openstack/nova-metadata-0" Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.037966 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b57e1273-601c-4338-90bd-9047ae46b65a-logs\") pod \"nova-metadata-0\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " pod="openstack/nova-metadata-0" Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.038578 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b57e1273-601c-4338-90bd-9047ae46b65a-logs\") pod \"nova-metadata-0\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " pod="openstack/nova-metadata-0" Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.041895 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b57e1273-601c-4338-90bd-9047ae46b65a-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " pod="openstack/nova-metadata-0" Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.043054 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b57e1273-601c-4338-90bd-9047ae46b65a-config-data\") pod \"nova-metadata-0\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " pod="openstack/nova-metadata-0" Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.043665 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b57e1273-601c-4338-90bd-9047ae46b65a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " pod="openstack/nova-metadata-0" Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.055925 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jbdt\" (UniqueName: \"kubernetes.io/projected/b57e1273-601c-4338-90bd-9047ae46b65a-kube-api-access-6jbdt\") pod \"nova-metadata-0\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " pod="openstack/nova-metadata-0" Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.066055 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.092626 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.324403 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6e77d16-341a-4f84-8427-82712eb6541f" path="/var/lib/kubelet/pods/a6e77d16-341a-4f84-8427-82712eb6541f/volumes" Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.325846 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8" path="/var/lib/kubelet/pods/d3787b8c-2ecd-4ce9-8dfc-853f3d3c6ec8/volumes" Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.326570 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db66469a-ca4a-4f4b-b657-70bf41cd45db" path="/var/lib/kubelet/pods/db66469a-ca4a-4f4b-b657-70bf41cd45db/volumes" Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.521927 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.522285 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerName="ceilometer-central-agent" containerID="cri-o://d4d56c8b40253bdff3b78dfce7375baff03acd16e1eb3619dcaaa0189a673b80" gracePeriod=30 Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.522312 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerName="proxy-httpd" containerID="cri-o://8ec0ce83b0516015af9602e6302492334cf6e42a39f1188e6b5f8521cd6ccac5" gracePeriod=30 Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.522367 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerName="sg-core" containerID="cri-o://643f34bc4429cf21c2ddd5f217c5547e99769b91406851c62581dbab5a7adacd" gracePeriod=30 Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.522393 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerName="ceilometer-notification-agent" containerID="cri-o://a0b5c3e794b6a0afd8261726b3d10df95022d96335579198212a7bd02551fb56" gracePeriod=30 Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.556196 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.557525 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.610228 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:42:36 crc kubenswrapper[4857]: W1128 13:42:36.612780 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb57e1273_601c_4338_90bd_9047ae46b65a.slice/crio-54b0763f47902ee147e8b9752b435c3575ae599eb60befbd8a952c495c1effaf WatchSource:0}: Error finding container 54b0763f47902ee147e8b9752b435c3575ae599eb60befbd8a952c495c1effaf: Status 404 returned error can't find the container with id 54b0763f47902ee147e8b9752b435c3575ae599eb60befbd8a952c495c1effaf Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.674494 4857 generic.go:334] "Generic (PLEG): container finished" podID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerID="643f34bc4429cf21c2ddd5f217c5547e99769b91406851c62581dbab5a7adacd" exitCode=2 Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.674553 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e97fad4-9072-4c1e-be9a-7d389ffa65a6","Type":"ContainerDied","Data":"643f34bc4429cf21c2ddd5f217c5547e99769b91406851c62581dbab5a7adacd"} Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.676033 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b57e1273-601c-4338-90bd-9047ae46b65a","Type":"ContainerStarted","Data":"54b0763f47902ee147e8b9752b435c3575ae599eb60befbd8a952c495c1effaf"} Nov 28 13:42:36 crc kubenswrapper[4857]: I1128 13:42:36.677979 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1fa6d725-8054-46f1-8c0c-c693d5306563","Type":"ContainerStarted","Data":"cbd2b9e7b7d5ac247cdedd4649bdad8529cc7aa4ebacb10e1349b2c0e85bf7b0"} Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.092696 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-rhznt" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.260296 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-config-data\") pod \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\" (UID: \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\") " Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.260423 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-combined-ca-bundle\") pod \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\" (UID: \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\") " Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.260504 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-scripts\") pod \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\" (UID: \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\") " Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.260638 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k2x87\" (UniqueName: \"kubernetes.io/projected/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-kube-api-access-k2x87\") pod \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\" (UID: \"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7\") " Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.268936 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-kube-api-access-k2x87" (OuterVolumeSpecName: "kube-api-access-k2x87") pod "7b35e465-15a4-4f5a-a53a-2fb23b2edeb7" (UID: "7b35e465-15a4-4f5a-a53a-2fb23b2edeb7"). InnerVolumeSpecName "kube-api-access-k2x87". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.274054 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-scripts" (OuterVolumeSpecName: "scripts") pod "7b35e465-15a4-4f5a-a53a-2fb23b2edeb7" (UID: "7b35e465-15a4-4f5a-a53a-2fb23b2edeb7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.294775 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7b35e465-15a4-4f5a-a53a-2fb23b2edeb7" (UID: "7b35e465-15a4-4f5a-a53a-2fb23b2edeb7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.335959 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-config-data" (OuterVolumeSpecName: "config-data") pod "7b35e465-15a4-4f5a-a53a-2fb23b2edeb7" (UID: "7b35e465-15a4-4f5a-a53a-2fb23b2edeb7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.362616 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k2x87\" (UniqueName: \"kubernetes.io/projected/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-kube-api-access-k2x87\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.362646 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.362655 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.362664 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.713736 4857 generic.go:334] "Generic (PLEG): container finished" podID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerID="8ec0ce83b0516015af9602e6302492334cf6e42a39f1188e6b5f8521cd6ccac5" exitCode=0 Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.713796 4857 generic.go:334] "Generic (PLEG): container finished" podID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerID="d4d56c8b40253bdff3b78dfce7375baff03acd16e1eb3619dcaaa0189a673b80" exitCode=0 Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.713895 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e97fad4-9072-4c1e-be9a-7d389ffa65a6","Type":"ContainerDied","Data":"8ec0ce83b0516015af9602e6302492334cf6e42a39f1188e6b5f8521cd6ccac5"} Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.713957 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e97fad4-9072-4c1e-be9a-7d389ffa65a6","Type":"ContainerDied","Data":"d4d56c8b40253bdff3b78dfce7375baff03acd16e1eb3619dcaaa0189a673b80"} Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.716533 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b57e1273-601c-4338-90bd-9047ae46b65a","Type":"ContainerStarted","Data":"f40ddd142fa37048e48987bebb61612d533528aba55010e42f4d8ad33abba695"} Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.716855 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b57e1273-601c-4338-90bd-9047ae46b65a","Type":"ContainerStarted","Data":"849f1ffdfd0189980913d9c84417eeb4cc7cf8a96da7546b425510a43946f590"} Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.723103 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1fa6d725-8054-46f1-8c0c-c693d5306563","Type":"ContainerStarted","Data":"5293dd02b3d8cbb50029798677d596c61dae0e02fb1b0ef17359254ce5d584b6"} Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.724019 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.730567 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-rhznt" event={"ID":"7b35e465-15a4-4f5a-a53a-2fb23b2edeb7","Type":"ContainerDied","Data":"78c54f1464a9165f30c39959007fb6e38bb0007d895ca5432935316832aac98a"} Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.730612 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="78c54f1464a9165f30c39959007fb6e38bb0007d895ca5432935316832aac98a" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.730691 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-rhznt" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.737315 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 13:42:37 crc kubenswrapper[4857]: E1128 13:42:37.737919 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b35e465-15a4-4f5a-a53a-2fb23b2edeb7" containerName="nova-cell1-conductor-db-sync" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.737963 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b35e465-15a4-4f5a-a53a-2fb23b2edeb7" containerName="nova-cell1-conductor-db-sync" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.738286 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b35e465-15a4-4f5a-a53a-2fb23b2edeb7" containerName="nova-cell1-conductor-db-sync" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.739537 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.757584 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.772569 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.779477 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7bf9e28-fd40-4b0d-aac9-995eff12a115-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"f7bf9e28-fd40-4b0d-aac9-995eff12a115\") " pod="openstack/nova-cell1-conductor-0" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.779568 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9kp9n\" (UniqueName: \"kubernetes.io/projected/f7bf9e28-fd40-4b0d-aac9-995eff12a115-kube-api-access-9kp9n\") pod \"nova-cell1-conductor-0\" (UID: \"f7bf9e28-fd40-4b0d-aac9-995eff12a115\") " pod="openstack/nova-cell1-conductor-0" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.779871 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7bf9e28-fd40-4b0d-aac9-995eff12a115-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"f7bf9e28-fd40-4b0d-aac9-995eff12a115\") " pod="openstack/nova-cell1-conductor-0" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.804288 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.8042610249999997 podStartE2EDuration="2.804261025s" podCreationTimestamp="2025-11-28 13:42:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:42:37.743111786 +0000 UTC m=+1449.770486973" watchObservedRunningTime="2025-11-28 13:42:37.804261025 +0000 UTC m=+1449.831636182" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.817831 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.450729797 podStartE2EDuration="2.817812465s" podCreationTimestamp="2025-11-28 13:42:35 +0000 UTC" firstStartedPulling="2025-11-28 13:42:36.557313016 +0000 UTC m=+1448.584688183" lastFinishedPulling="2025-11-28 13:42:36.924395694 +0000 UTC m=+1448.951770851" observedRunningTime="2025-11-28 13:42:37.798279223 +0000 UTC m=+1449.825654390" watchObservedRunningTime="2025-11-28 13:42:37.817812465 +0000 UTC m=+1449.845187632" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.884096 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7bf9e28-fd40-4b0d-aac9-995eff12a115-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"f7bf9e28-fd40-4b0d-aac9-995eff12a115\") " pod="openstack/nova-cell1-conductor-0" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.884212 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7bf9e28-fd40-4b0d-aac9-995eff12a115-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"f7bf9e28-fd40-4b0d-aac9-995eff12a115\") " pod="openstack/nova-cell1-conductor-0" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.884267 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9kp9n\" (UniqueName: \"kubernetes.io/projected/f7bf9e28-fd40-4b0d-aac9-995eff12a115-kube-api-access-9kp9n\") pod \"nova-cell1-conductor-0\" (UID: \"f7bf9e28-fd40-4b0d-aac9-995eff12a115\") " pod="openstack/nova-cell1-conductor-0" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.888069 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7bf9e28-fd40-4b0d-aac9-995eff12a115-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"f7bf9e28-fd40-4b0d-aac9-995eff12a115\") " pod="openstack/nova-cell1-conductor-0" Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.894704 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7bf9e28-fd40-4b0d-aac9-995eff12a115-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"f7bf9e28-fd40-4b0d-aac9-995eff12a115\") " pod="openstack/nova-cell1-conductor-0" Nov 28 13:42:37 crc kubenswrapper[4857]: E1128 13:42:37.903874 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5efe0f809c40cd40a2ee8b6470ce7ff4ad7de3631f52cff7dd89555f97e66d87" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 13:42:37 crc kubenswrapper[4857]: I1128 13:42:37.904263 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9kp9n\" (UniqueName: \"kubernetes.io/projected/f7bf9e28-fd40-4b0d-aac9-995eff12a115-kube-api-access-9kp9n\") pod \"nova-cell1-conductor-0\" (UID: \"f7bf9e28-fd40-4b0d-aac9-995eff12a115\") " pod="openstack/nova-cell1-conductor-0" Nov 28 13:42:37 crc kubenswrapper[4857]: E1128 13:42:37.911408 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5efe0f809c40cd40a2ee8b6470ce7ff4ad7de3631f52cff7dd89555f97e66d87" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 13:42:37 crc kubenswrapper[4857]: E1128 13:42:37.928028 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5efe0f809c40cd40a2ee8b6470ce7ff4ad7de3631f52cff7dd89555f97e66d87" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 13:42:37 crc kubenswrapper[4857]: E1128 13:42:37.928107 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="5d25d4c7-4988-4baf-8b11-8b0c9be42e23" containerName="nova-scheduler-scheduler" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.081456 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.213491 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.392711 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-772x6\" (UniqueName: \"kubernetes.io/projected/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-kube-api-access-772x6\") pod \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.392775 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-combined-ca-bundle\") pod \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.392901 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-config-data\") pod \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.392930 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-log-httpd\") pod \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.392986 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-run-httpd\") pod \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.393023 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-sg-core-conf-yaml\") pod \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.393060 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-scripts\") pod \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\" (UID: \"6e97fad4-9072-4c1e-be9a-7d389ffa65a6\") " Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.399061 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-scripts" (OuterVolumeSpecName: "scripts") pod "6e97fad4-9072-4c1e-be9a-7d389ffa65a6" (UID: "6e97fad4-9072-4c1e-be9a-7d389ffa65a6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.399391 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6e97fad4-9072-4c1e-be9a-7d389ffa65a6" (UID: "6e97fad4-9072-4c1e-be9a-7d389ffa65a6"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.399611 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6e97fad4-9072-4c1e-be9a-7d389ffa65a6" (UID: "6e97fad4-9072-4c1e-be9a-7d389ffa65a6"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.415222 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-kube-api-access-772x6" (OuterVolumeSpecName: "kube-api-access-772x6") pod "6e97fad4-9072-4c1e-be9a-7d389ffa65a6" (UID: "6e97fad4-9072-4c1e-be9a-7d389ffa65a6"). InnerVolumeSpecName "kube-api-access-772x6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.444867 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6e97fad4-9072-4c1e-be9a-7d389ffa65a6" (UID: "6e97fad4-9072-4c1e-be9a-7d389ffa65a6"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.495555 4857 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.495583 4857 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.495595 4857 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.495605 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.495617 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-772x6\" (UniqueName: \"kubernetes.io/projected/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-kube-api-access-772x6\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.506412 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6e97fad4-9072-4c1e-be9a-7d389ffa65a6" (UID: "6e97fad4-9072-4c1e-be9a-7d389ffa65a6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.521780 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-config-data" (OuterVolumeSpecName: "config-data") pod "6e97fad4-9072-4c1e-be9a-7d389ffa65a6" (UID: "6e97fad4-9072-4c1e-be9a-7d389ffa65a6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.539525 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.598149 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.598813 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e97fad4-9072-4c1e-be9a-7d389ffa65a6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.741451 4857 generic.go:334] "Generic (PLEG): container finished" podID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerID="a0b5c3e794b6a0afd8261726b3d10df95022d96335579198212a7bd02551fb56" exitCode=0 Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.741532 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.741535 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e97fad4-9072-4c1e-be9a-7d389ffa65a6","Type":"ContainerDied","Data":"a0b5c3e794b6a0afd8261726b3d10df95022d96335579198212a7bd02551fb56"} Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.741595 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e97fad4-9072-4c1e-be9a-7d389ffa65a6","Type":"ContainerDied","Data":"651d49a82571a51dde022f119b5b01a28457c8a4cd17ea420c77253c48d376aa"} Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.741630 4857 scope.go:117] "RemoveContainer" containerID="8ec0ce83b0516015af9602e6302492334cf6e42a39f1188e6b5f8521cd6ccac5" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.744164 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"f7bf9e28-fd40-4b0d-aac9-995eff12a115","Type":"ContainerStarted","Data":"af2a21437b5950c07391db6d069bd153d9b422fd5daa52cd346a87417d643f35"} Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.744209 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"f7bf9e28-fd40-4b0d-aac9-995eff12a115","Type":"ContainerStarted","Data":"426fecab0952c028776039daf03243cae7cd1f7183e4fcccea5133ee27cd2596"} Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.763833 4857 scope.go:117] "RemoveContainer" containerID="643f34bc4429cf21c2ddd5f217c5547e99769b91406851c62581dbab5a7adacd" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.771205 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=1.77118713 podStartE2EDuration="1.77118713s" podCreationTimestamp="2025-11-28 13:42:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:42:38.762268754 +0000 UTC m=+1450.789643921" watchObservedRunningTime="2025-11-28 13:42:38.77118713 +0000 UTC m=+1450.798562297" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.787603 4857 scope.go:117] "RemoveContainer" containerID="a0b5c3e794b6a0afd8261726b3d10df95022d96335579198212a7bd02551fb56" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.787738 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.805390 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.812990 4857 scope.go:117] "RemoveContainer" containerID="d4d56c8b40253bdff3b78dfce7375baff03acd16e1eb3619dcaaa0189a673b80" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.817320 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:42:38 crc kubenswrapper[4857]: E1128 13:42:38.817796 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerName="ceilometer-central-agent" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.817812 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerName="ceilometer-central-agent" Nov 28 13:42:38 crc kubenswrapper[4857]: E1128 13:42:38.817836 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerName="proxy-httpd" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.817845 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerName="proxy-httpd" Nov 28 13:42:38 crc kubenswrapper[4857]: E1128 13:42:38.817861 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerName="ceilometer-notification-agent" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.817868 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerName="ceilometer-notification-agent" Nov 28 13:42:38 crc kubenswrapper[4857]: E1128 13:42:38.817883 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerName="sg-core" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.817891 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerName="sg-core" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.818128 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerName="proxy-httpd" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.818160 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerName="ceilometer-central-agent" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.818199 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerName="ceilometer-notification-agent" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.818210 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" containerName="sg-core" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.820100 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.823263 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.826958 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.827126 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.827654 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.857774 4857 scope.go:117] "RemoveContainer" containerID="8ec0ce83b0516015af9602e6302492334cf6e42a39f1188e6b5f8521cd6ccac5" Nov 28 13:42:38 crc kubenswrapper[4857]: E1128 13:42:38.858280 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ec0ce83b0516015af9602e6302492334cf6e42a39f1188e6b5f8521cd6ccac5\": container with ID starting with 8ec0ce83b0516015af9602e6302492334cf6e42a39f1188e6b5f8521cd6ccac5 not found: ID does not exist" containerID="8ec0ce83b0516015af9602e6302492334cf6e42a39f1188e6b5f8521cd6ccac5" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.858312 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ec0ce83b0516015af9602e6302492334cf6e42a39f1188e6b5f8521cd6ccac5"} err="failed to get container status \"8ec0ce83b0516015af9602e6302492334cf6e42a39f1188e6b5f8521cd6ccac5\": rpc error: code = NotFound desc = could not find container \"8ec0ce83b0516015af9602e6302492334cf6e42a39f1188e6b5f8521cd6ccac5\": container with ID starting with 8ec0ce83b0516015af9602e6302492334cf6e42a39f1188e6b5f8521cd6ccac5 not found: ID does not exist" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.858332 4857 scope.go:117] "RemoveContainer" containerID="643f34bc4429cf21c2ddd5f217c5547e99769b91406851c62581dbab5a7adacd" Nov 28 13:42:38 crc kubenswrapper[4857]: E1128 13:42:38.858690 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"643f34bc4429cf21c2ddd5f217c5547e99769b91406851c62581dbab5a7adacd\": container with ID starting with 643f34bc4429cf21c2ddd5f217c5547e99769b91406851c62581dbab5a7adacd not found: ID does not exist" containerID="643f34bc4429cf21c2ddd5f217c5547e99769b91406851c62581dbab5a7adacd" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.858713 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"643f34bc4429cf21c2ddd5f217c5547e99769b91406851c62581dbab5a7adacd"} err="failed to get container status \"643f34bc4429cf21c2ddd5f217c5547e99769b91406851c62581dbab5a7adacd\": rpc error: code = NotFound desc = could not find container \"643f34bc4429cf21c2ddd5f217c5547e99769b91406851c62581dbab5a7adacd\": container with ID starting with 643f34bc4429cf21c2ddd5f217c5547e99769b91406851c62581dbab5a7adacd not found: ID does not exist" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.858727 4857 scope.go:117] "RemoveContainer" containerID="a0b5c3e794b6a0afd8261726b3d10df95022d96335579198212a7bd02551fb56" Nov 28 13:42:38 crc kubenswrapper[4857]: E1128 13:42:38.859137 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0b5c3e794b6a0afd8261726b3d10df95022d96335579198212a7bd02551fb56\": container with ID starting with a0b5c3e794b6a0afd8261726b3d10df95022d96335579198212a7bd02551fb56 not found: ID does not exist" containerID="a0b5c3e794b6a0afd8261726b3d10df95022d96335579198212a7bd02551fb56" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.859175 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0b5c3e794b6a0afd8261726b3d10df95022d96335579198212a7bd02551fb56"} err="failed to get container status \"a0b5c3e794b6a0afd8261726b3d10df95022d96335579198212a7bd02551fb56\": rpc error: code = NotFound desc = could not find container \"a0b5c3e794b6a0afd8261726b3d10df95022d96335579198212a7bd02551fb56\": container with ID starting with a0b5c3e794b6a0afd8261726b3d10df95022d96335579198212a7bd02551fb56 not found: ID does not exist" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.859203 4857 scope.go:117] "RemoveContainer" containerID="d4d56c8b40253bdff3b78dfce7375baff03acd16e1eb3619dcaaa0189a673b80" Nov 28 13:42:38 crc kubenswrapper[4857]: E1128 13:42:38.859527 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4d56c8b40253bdff3b78dfce7375baff03acd16e1eb3619dcaaa0189a673b80\": container with ID starting with d4d56c8b40253bdff3b78dfce7375baff03acd16e1eb3619dcaaa0189a673b80 not found: ID does not exist" containerID="d4d56c8b40253bdff3b78dfce7375baff03acd16e1eb3619dcaaa0189a673b80" Nov 28 13:42:38 crc kubenswrapper[4857]: I1128 13:42:38.859545 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4d56c8b40253bdff3b78dfce7375baff03acd16e1eb3619dcaaa0189a673b80"} err="failed to get container status \"d4d56c8b40253bdff3b78dfce7375baff03acd16e1eb3619dcaaa0189a673b80\": rpc error: code = NotFound desc = could not find container \"d4d56c8b40253bdff3b78dfce7375baff03acd16e1eb3619dcaaa0189a673b80\": container with ID starting with d4d56c8b40253bdff3b78dfce7375baff03acd16e1eb3619dcaaa0189a673b80 not found: ID does not exist" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.007162 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.007255 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-config-data\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.007335 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-log-httpd\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.007364 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjlkv\" (UniqueName: \"kubernetes.io/projected/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-kube-api-access-rjlkv\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.007385 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.007432 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-scripts\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.007472 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.007503 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-run-httpd\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.109681 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-log-httpd\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.109778 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjlkv\" (UniqueName: \"kubernetes.io/projected/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-kube-api-access-rjlkv\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.109809 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.109872 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-scripts\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.109919 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.109952 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-run-httpd\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.110014 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.110087 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-config-data\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.112473 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-run-httpd\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.112600 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-log-httpd\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.116839 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.118033 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.118601 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-scripts\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.118680 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.124955 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-config-data\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.131280 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjlkv\" (UniqueName: \"kubernetes.io/projected/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-kube-api-access-rjlkv\") pod \"ceilometer-0\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.139890 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.604595 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:42:39 crc kubenswrapper[4857]: W1128 13:42:39.609243 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8f8ac068_4b8e_4d9f_90af_eb4bcf19c443.slice/crio-75d2ef7067e3baac71e8adeb213f3c4452ab78a203920b6d5df7688a736af4cc WatchSource:0}: Error finding container 75d2ef7067e3baac71e8adeb213f3c4452ab78a203920b6d5df7688a736af4cc: Status 404 returned error can't find the container with id 75d2ef7067e3baac71e8adeb213f3c4452ab78a203920b6d5df7688a736af4cc Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.639334 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.754187 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443","Type":"ContainerStarted","Data":"75d2ef7067e3baac71e8adeb213f3c4452ab78a203920b6d5df7688a736af4cc"} Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.759492 4857 generic.go:334] "Generic (PLEG): container finished" podID="62339922-009a-4a67-93bc-095aa84f945e" containerID="97b041f6bed24fcb2db50548f37540c8d4d9885f3ee5e1847404436e539c880d" exitCode=0 Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.759553 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.759604 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"62339922-009a-4a67-93bc-095aa84f945e","Type":"ContainerDied","Data":"97b041f6bed24fcb2db50548f37540c8d4d9885f3ee5e1847404436e539c880d"} Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.759656 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"62339922-009a-4a67-93bc-095aa84f945e","Type":"ContainerDied","Data":"112130084e56af62e372d7a4969902439f4c5555d7fc52fbfe4be6f0dcf56a5f"} Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.759681 4857 scope.go:117] "RemoveContainer" containerID="97b041f6bed24fcb2db50548f37540c8d4d9885f3ee5e1847404436e539c880d" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.760505 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.783903 4857 scope.go:117] "RemoveContainer" containerID="46d8c5620e55bae59dcdadf7ccf2b686ec5ae9dae41652130b871690631948fe" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.800153 4857 scope.go:117] "RemoveContainer" containerID="97b041f6bed24fcb2db50548f37540c8d4d9885f3ee5e1847404436e539c880d" Nov 28 13:42:39 crc kubenswrapper[4857]: E1128 13:42:39.800613 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97b041f6bed24fcb2db50548f37540c8d4d9885f3ee5e1847404436e539c880d\": container with ID starting with 97b041f6bed24fcb2db50548f37540c8d4d9885f3ee5e1847404436e539c880d not found: ID does not exist" containerID="97b041f6bed24fcb2db50548f37540c8d4d9885f3ee5e1847404436e539c880d" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.800674 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97b041f6bed24fcb2db50548f37540c8d4d9885f3ee5e1847404436e539c880d"} err="failed to get container status \"97b041f6bed24fcb2db50548f37540c8d4d9885f3ee5e1847404436e539c880d\": rpc error: code = NotFound desc = could not find container \"97b041f6bed24fcb2db50548f37540c8d4d9885f3ee5e1847404436e539c880d\": container with ID starting with 97b041f6bed24fcb2db50548f37540c8d4d9885f3ee5e1847404436e539c880d not found: ID does not exist" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.800704 4857 scope.go:117] "RemoveContainer" containerID="46d8c5620e55bae59dcdadf7ccf2b686ec5ae9dae41652130b871690631948fe" Nov 28 13:42:39 crc kubenswrapper[4857]: E1128 13:42:39.801019 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46d8c5620e55bae59dcdadf7ccf2b686ec5ae9dae41652130b871690631948fe\": container with ID starting with 46d8c5620e55bae59dcdadf7ccf2b686ec5ae9dae41652130b871690631948fe not found: ID does not exist" containerID="46d8c5620e55bae59dcdadf7ccf2b686ec5ae9dae41652130b871690631948fe" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.801051 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46d8c5620e55bae59dcdadf7ccf2b686ec5ae9dae41652130b871690631948fe"} err="failed to get container status \"46d8c5620e55bae59dcdadf7ccf2b686ec5ae9dae41652130b871690631948fe\": rpc error: code = NotFound desc = could not find container \"46d8c5620e55bae59dcdadf7ccf2b686ec5ae9dae41652130b871690631948fe\": container with ID starting with 46d8c5620e55bae59dcdadf7ccf2b686ec5ae9dae41652130b871690631948fe not found: ID does not exist" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.825281 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4cszx\" (UniqueName: \"kubernetes.io/projected/62339922-009a-4a67-93bc-095aa84f945e-kube-api-access-4cszx\") pod \"62339922-009a-4a67-93bc-095aa84f945e\" (UID: \"62339922-009a-4a67-93bc-095aa84f945e\") " Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.825322 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62339922-009a-4a67-93bc-095aa84f945e-combined-ca-bundle\") pod \"62339922-009a-4a67-93bc-095aa84f945e\" (UID: \"62339922-009a-4a67-93bc-095aa84f945e\") " Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.825367 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62339922-009a-4a67-93bc-095aa84f945e-config-data\") pod \"62339922-009a-4a67-93bc-095aa84f945e\" (UID: \"62339922-009a-4a67-93bc-095aa84f945e\") " Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.825533 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/62339922-009a-4a67-93bc-095aa84f945e-logs\") pod \"62339922-009a-4a67-93bc-095aa84f945e\" (UID: \"62339922-009a-4a67-93bc-095aa84f945e\") " Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.826328 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62339922-009a-4a67-93bc-095aa84f945e-logs" (OuterVolumeSpecName: "logs") pod "62339922-009a-4a67-93bc-095aa84f945e" (UID: "62339922-009a-4a67-93bc-095aa84f945e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.837470 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62339922-009a-4a67-93bc-095aa84f945e-kube-api-access-4cszx" (OuterVolumeSpecName: "kube-api-access-4cszx") pod "62339922-009a-4a67-93bc-095aa84f945e" (UID: "62339922-009a-4a67-93bc-095aa84f945e"). InnerVolumeSpecName "kube-api-access-4cszx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.870850 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62339922-009a-4a67-93bc-095aa84f945e-config-data" (OuterVolumeSpecName: "config-data") pod "62339922-009a-4a67-93bc-095aa84f945e" (UID: "62339922-009a-4a67-93bc-095aa84f945e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.911866 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62339922-009a-4a67-93bc-095aa84f945e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "62339922-009a-4a67-93bc-095aa84f945e" (UID: "62339922-009a-4a67-93bc-095aa84f945e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.927459 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/62339922-009a-4a67-93bc-095aa84f945e-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.927487 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4cszx\" (UniqueName: \"kubernetes.io/projected/62339922-009a-4a67-93bc-095aa84f945e-kube-api-access-4cszx\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.927498 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62339922-009a-4a67-93bc-095aa84f945e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:39 crc kubenswrapper[4857]: I1128 13:42:39.927508 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62339922-009a-4a67-93bc-095aa84f945e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.208216 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.229033 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.240255 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.275900 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 13:42:40 crc kubenswrapper[4857]: E1128 13:42:40.276369 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62339922-009a-4a67-93bc-095aa84f945e" containerName="nova-api-api" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.276392 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="62339922-009a-4a67-93bc-095aa84f945e" containerName="nova-api-api" Nov 28 13:42:40 crc kubenswrapper[4857]: E1128 13:42:40.276414 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d25d4c7-4988-4baf-8b11-8b0c9be42e23" containerName="nova-scheduler-scheduler" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.276423 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d25d4c7-4988-4baf-8b11-8b0c9be42e23" containerName="nova-scheduler-scheduler" Nov 28 13:42:40 crc kubenswrapper[4857]: E1128 13:42:40.276460 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62339922-009a-4a67-93bc-095aa84f945e" containerName="nova-api-log" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.276470 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="62339922-009a-4a67-93bc-095aa84f945e" containerName="nova-api-log" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.276668 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="62339922-009a-4a67-93bc-095aa84f945e" containerName="nova-api-api" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.276695 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="62339922-009a-4a67-93bc-095aa84f945e" containerName="nova-api-log" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.276706 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d25d4c7-4988-4baf-8b11-8b0c9be42e23" containerName="nova-scheduler-scheduler" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.277911 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.285050 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.295694 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.329675 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62339922-009a-4a67-93bc-095aa84f945e" path="/var/lib/kubelet/pods/62339922-009a-4a67-93bc-095aa84f945e/volumes" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.330741 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e97fad4-9072-4c1e-be9a-7d389ffa65a6" path="/var/lib/kubelet/pods/6e97fad4-9072-4c1e-be9a-7d389ffa65a6/volumes" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.334184 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-drltg\" (UniqueName: \"kubernetes.io/projected/5d25d4c7-4988-4baf-8b11-8b0c9be42e23-kube-api-access-drltg\") pod \"5d25d4c7-4988-4baf-8b11-8b0c9be42e23\" (UID: \"5d25d4c7-4988-4baf-8b11-8b0c9be42e23\") " Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.334441 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d25d4c7-4988-4baf-8b11-8b0c9be42e23-config-data\") pod \"5d25d4c7-4988-4baf-8b11-8b0c9be42e23\" (UID: \"5d25d4c7-4988-4baf-8b11-8b0c9be42e23\") " Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.334509 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d25d4c7-4988-4baf-8b11-8b0c9be42e23-combined-ca-bundle\") pod \"5d25d4c7-4988-4baf-8b11-8b0c9be42e23\" (UID: \"5d25d4c7-4988-4baf-8b11-8b0c9be42e23\") " Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.342401 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d25d4c7-4988-4baf-8b11-8b0c9be42e23-kube-api-access-drltg" (OuterVolumeSpecName: "kube-api-access-drltg") pod "5d25d4c7-4988-4baf-8b11-8b0c9be42e23" (UID: "5d25d4c7-4988-4baf-8b11-8b0c9be42e23"). InnerVolumeSpecName "kube-api-access-drltg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.393986 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d25d4c7-4988-4baf-8b11-8b0c9be42e23-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5d25d4c7-4988-4baf-8b11-8b0c9be42e23" (UID: "5d25d4c7-4988-4baf-8b11-8b0c9be42e23"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.394864 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d25d4c7-4988-4baf-8b11-8b0c9be42e23-config-data" (OuterVolumeSpecName: "config-data") pod "5d25d4c7-4988-4baf-8b11-8b0c9be42e23" (UID: "5d25d4c7-4988-4baf-8b11-8b0c9be42e23"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.439793 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9j9kt\" (UniqueName: \"kubernetes.io/projected/de6e2aaf-df58-4db9-86e8-6491e5bb332e-kube-api-access-9j9kt\") pod \"nova-api-0\" (UID: \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\") " pod="openstack/nova-api-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.439914 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de6e2aaf-df58-4db9-86e8-6491e5bb332e-logs\") pod \"nova-api-0\" (UID: \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\") " pod="openstack/nova-api-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.439957 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de6e2aaf-df58-4db9-86e8-6491e5bb332e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\") " pod="openstack/nova-api-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.440014 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de6e2aaf-df58-4db9-86e8-6491e5bb332e-config-data\") pod \"nova-api-0\" (UID: \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\") " pod="openstack/nova-api-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.440104 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d25d4c7-4988-4baf-8b11-8b0c9be42e23-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.440115 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-drltg\" (UniqueName: \"kubernetes.io/projected/5d25d4c7-4988-4baf-8b11-8b0c9be42e23-kube-api-access-drltg\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.440126 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d25d4c7-4988-4baf-8b11-8b0c9be42e23-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.541374 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de6e2aaf-df58-4db9-86e8-6491e5bb332e-config-data\") pod \"nova-api-0\" (UID: \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\") " pod="openstack/nova-api-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.541469 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9j9kt\" (UniqueName: \"kubernetes.io/projected/de6e2aaf-df58-4db9-86e8-6491e5bb332e-kube-api-access-9j9kt\") pod \"nova-api-0\" (UID: \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\") " pod="openstack/nova-api-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.541504 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de6e2aaf-df58-4db9-86e8-6491e5bb332e-logs\") pod \"nova-api-0\" (UID: \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\") " pod="openstack/nova-api-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.541538 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de6e2aaf-df58-4db9-86e8-6491e5bb332e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\") " pod="openstack/nova-api-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.542628 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de6e2aaf-df58-4db9-86e8-6491e5bb332e-logs\") pod \"nova-api-0\" (UID: \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\") " pod="openstack/nova-api-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.546670 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de6e2aaf-df58-4db9-86e8-6491e5bb332e-config-data\") pod \"nova-api-0\" (UID: \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\") " pod="openstack/nova-api-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.546739 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de6e2aaf-df58-4db9-86e8-6491e5bb332e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\") " pod="openstack/nova-api-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.558822 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9j9kt\" (UniqueName: \"kubernetes.io/projected/de6e2aaf-df58-4db9-86e8-6491e5bb332e-kube-api-access-9j9kt\") pod \"nova-api-0\" (UID: \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\") " pod="openstack/nova-api-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.621797 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.773299 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443","Type":"ContainerStarted","Data":"28c3d4cb3ecd46fc90613fe6166df37aa4f90d2300053b277246858435c0381d"} Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.775551 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d25d4c7-4988-4baf-8b11-8b0c9be42e23" containerID="5efe0f809c40cd40a2ee8b6470ce7ff4ad7de3631f52cff7dd89555f97e66d87" exitCode=0 Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.775610 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"5d25d4c7-4988-4baf-8b11-8b0c9be42e23","Type":"ContainerDied","Data":"5efe0f809c40cd40a2ee8b6470ce7ff4ad7de3631f52cff7dd89555f97e66d87"} Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.775637 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"5d25d4c7-4988-4baf-8b11-8b0c9be42e23","Type":"ContainerDied","Data":"61da513c7c915ce89659370805c3344c94a6d46e98b850a1dc455b8855b68b00"} Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.775653 4857 scope.go:117] "RemoveContainer" containerID="5efe0f809c40cd40a2ee8b6470ce7ff4ad7de3631f52cff7dd89555f97e66d87" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.775770 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.824110 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.837930 4857 scope.go:117] "RemoveContainer" containerID="5efe0f809c40cd40a2ee8b6470ce7ff4ad7de3631f52cff7dd89555f97e66d87" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.838033 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:42:40 crc kubenswrapper[4857]: E1128 13:42:40.838251 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5efe0f809c40cd40a2ee8b6470ce7ff4ad7de3631f52cff7dd89555f97e66d87\": container with ID starting with 5efe0f809c40cd40a2ee8b6470ce7ff4ad7de3631f52cff7dd89555f97e66d87 not found: ID does not exist" containerID="5efe0f809c40cd40a2ee8b6470ce7ff4ad7de3631f52cff7dd89555f97e66d87" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.838280 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5efe0f809c40cd40a2ee8b6470ce7ff4ad7de3631f52cff7dd89555f97e66d87"} err="failed to get container status \"5efe0f809c40cd40a2ee8b6470ce7ff4ad7de3631f52cff7dd89555f97e66d87\": rpc error: code = NotFound desc = could not find container \"5efe0f809c40cd40a2ee8b6470ce7ff4ad7de3631f52cff7dd89555f97e66d87\": container with ID starting with 5efe0f809c40cd40a2ee8b6470ce7ff4ad7de3631f52cff7dd89555f97e66d87 not found: ID does not exist" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.852154 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.853467 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.861187 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.861518 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.948783 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hkpc\" (UniqueName: \"kubernetes.io/projected/4643753a-aa92-4d3e-a95e-5124d7792edb-kube-api-access-8hkpc\") pod \"nova-scheduler-0\" (UID: \"4643753a-aa92-4d3e-a95e-5124d7792edb\") " pod="openstack/nova-scheduler-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.948887 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4643753a-aa92-4d3e-a95e-5124d7792edb-config-data\") pod \"nova-scheduler-0\" (UID: \"4643753a-aa92-4d3e-a95e-5124d7792edb\") " pod="openstack/nova-scheduler-0" Nov 28 13:42:40 crc kubenswrapper[4857]: I1128 13:42:40.948933 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4643753a-aa92-4d3e-a95e-5124d7792edb-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4643753a-aa92-4d3e-a95e-5124d7792edb\") " pod="openstack/nova-scheduler-0" Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.051325 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4643753a-aa92-4d3e-a95e-5124d7792edb-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4643753a-aa92-4d3e-a95e-5124d7792edb\") " pod="openstack/nova-scheduler-0" Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.051482 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hkpc\" (UniqueName: \"kubernetes.io/projected/4643753a-aa92-4d3e-a95e-5124d7792edb-kube-api-access-8hkpc\") pod \"nova-scheduler-0\" (UID: \"4643753a-aa92-4d3e-a95e-5124d7792edb\") " pod="openstack/nova-scheduler-0" Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.051593 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4643753a-aa92-4d3e-a95e-5124d7792edb-config-data\") pod \"nova-scheduler-0\" (UID: \"4643753a-aa92-4d3e-a95e-5124d7792edb\") " pod="openstack/nova-scheduler-0" Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.056198 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4643753a-aa92-4d3e-a95e-5124d7792edb-config-data\") pod \"nova-scheduler-0\" (UID: \"4643753a-aa92-4d3e-a95e-5124d7792edb\") " pod="openstack/nova-scheduler-0" Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.059618 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4643753a-aa92-4d3e-a95e-5124d7792edb-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4643753a-aa92-4d3e-a95e-5124d7792edb\") " pod="openstack/nova-scheduler-0" Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.081443 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hkpc\" (UniqueName: \"kubernetes.io/projected/4643753a-aa92-4d3e-a95e-5124d7792edb-kube-api-access-8hkpc\") pod \"nova-scheduler-0\" (UID: \"4643753a-aa92-4d3e-a95e-5124d7792edb\") " pod="openstack/nova-scheduler-0" Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.092882 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.093202 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.108449 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:42:41 crc kubenswrapper[4857]: W1128 13:42:41.114050 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podde6e2aaf_df58_4db9_86e8_6491e5bb332e.slice/crio-56ee72b563a4eed87e0bcfc288988584b2ead19745136b033563446a2802ca77 WatchSource:0}: Error finding container 56ee72b563a4eed87e0bcfc288988584b2ead19745136b033563446a2802ca77: Status 404 returned error can't find the container with id 56ee72b563a4eed87e0bcfc288988584b2ead19745136b033563446a2802ca77 Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.185252 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:42:41 crc kubenswrapper[4857]: W1128 13:42:41.698801 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4643753a_aa92_4d3e_a95e_5124d7792edb.slice/crio-93850b54570d72c4714bd09e58fab986675a7c4c3300366fb43b0c93cde9a1d0 WatchSource:0}: Error finding container 93850b54570d72c4714bd09e58fab986675a7c4c3300366fb43b0c93cde9a1d0: Status 404 returned error can't find the container with id 93850b54570d72c4714bd09e58fab986675a7c4c3300366fb43b0c93cde9a1d0 Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.700700 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.806144 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4643753a-aa92-4d3e-a95e-5124d7792edb","Type":"ContainerStarted","Data":"93850b54570d72c4714bd09e58fab986675a7c4c3300366fb43b0c93cde9a1d0"} Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.810502 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443","Type":"ContainerStarted","Data":"de21ec0fca45734ca9b1e00a72ff9590914c3ff1fa1ca16fa7dec64d8b1c0cd8"} Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.815490 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"de6e2aaf-df58-4db9-86e8-6491e5bb332e","Type":"ContainerStarted","Data":"f2230527c267ffccf105a9abb6f07e9e0fd2ec753efb7ea5ebcce291e954d89f"} Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.815530 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"de6e2aaf-df58-4db9-86e8-6491e5bb332e","Type":"ContainerStarted","Data":"8e01a4489e7a626d8a2381f15f652da57c8463af2e23c47136eeae53f28cd831"} Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.815543 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"de6e2aaf-df58-4db9-86e8-6491e5bb332e","Type":"ContainerStarted","Data":"56ee72b563a4eed87e0bcfc288988584b2ead19745136b033563446a2802ca77"} Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.841140 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=1.841123931 podStartE2EDuration="1.841123931s" podCreationTimestamp="2025-11-28 13:42:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:42:41.836854138 +0000 UTC m=+1453.864229305" watchObservedRunningTime="2025-11-28 13:42:41.841123931 +0000 UTC m=+1453.868499098" Nov 28 13:42:42 crc kubenswrapper[4857]: I1128 13:42:42.322177 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d25d4c7-4988-4baf-8b11-8b0c9be42e23" path="/var/lib/kubelet/pods/5d25d4c7-4988-4baf-8b11-8b0c9be42e23/volumes" Nov 28 13:42:42 crc kubenswrapper[4857]: I1128 13:42:42.824777 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4643753a-aa92-4d3e-a95e-5124d7792edb","Type":"ContainerStarted","Data":"6cc050e6717812d5a9cb105606616c7aa8b61278288e1e4f8fb0f37c96bbe529"} Nov 28 13:42:42 crc kubenswrapper[4857]: I1128 13:42:42.827874 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443","Type":"ContainerStarted","Data":"8ae04268da464f2ee7de89ef09584bf5d6d86e180022a6df58565902f0c83921"} Nov 28 13:42:42 crc kubenswrapper[4857]: I1128 13:42:42.844176 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.844159185 podStartE2EDuration="2.844159185s" podCreationTimestamp="2025-11-28 13:42:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:42:42.837170804 +0000 UTC m=+1454.864545971" watchObservedRunningTime="2025-11-28 13:42:42.844159185 +0000 UTC m=+1454.871534352" Nov 28 13:42:43 crc kubenswrapper[4857]: I1128 13:42:43.111455 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 28 13:42:43 crc kubenswrapper[4857]: I1128 13:42:43.854800 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443","Type":"ContainerStarted","Data":"0d1a68176621baedb73d3db60f74244e357a0d27fc4513ba172924f0ab7c1aa2"} Nov 28 13:42:43 crc kubenswrapper[4857]: I1128 13:42:43.855066 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 13:42:43 crc kubenswrapper[4857]: I1128 13:42:43.893515 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.943770032 podStartE2EDuration="5.893496811s" podCreationTimestamp="2025-11-28 13:42:38 +0000 UTC" firstStartedPulling="2025-11-28 13:42:39.611585836 +0000 UTC m=+1451.638961003" lastFinishedPulling="2025-11-28 13:42:43.561312595 +0000 UTC m=+1455.588687782" observedRunningTime="2025-11-28 13:42:43.88827696 +0000 UTC m=+1455.915652127" watchObservedRunningTime="2025-11-28 13:42:43.893496811 +0000 UTC m=+1455.920871978" Nov 28 13:42:46 crc kubenswrapper[4857]: I1128 13:42:46.079226 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 13:42:46 crc kubenswrapper[4857]: I1128 13:42:46.093566 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 13:42:46 crc kubenswrapper[4857]: I1128 13:42:46.093636 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 13:42:46 crc kubenswrapper[4857]: I1128 13:42:46.186738 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 13:42:47 crc kubenswrapper[4857]: I1128 13:42:47.105948 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="b57e1273-601c-4338-90bd-9047ae46b65a" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.190:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 13:42:47 crc kubenswrapper[4857]: I1128 13:42:47.105948 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="b57e1273-601c-4338-90bd-9047ae46b65a" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.190:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 13:42:50 crc kubenswrapper[4857]: I1128 13:42:50.622572 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 13:42:50 crc kubenswrapper[4857]: I1128 13:42:50.622932 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 13:42:51 crc kubenswrapper[4857]: I1128 13:42:51.186702 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 13:42:51 crc kubenswrapper[4857]: I1128 13:42:51.233807 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 13:42:51 crc kubenswrapper[4857]: I1128 13:42:51.704944 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="de6e2aaf-df58-4db9-86e8-6491e5bb332e" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.193:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 13:42:51 crc kubenswrapper[4857]: I1128 13:42:51.704973 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="de6e2aaf-df58-4db9-86e8-6491e5bb332e" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.193:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 13:42:51 crc kubenswrapper[4857]: I1128 13:42:51.960496 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 13:42:56 crc kubenswrapper[4857]: I1128 13:42:56.102168 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 13:42:56 crc kubenswrapper[4857]: I1128 13:42:56.104041 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 13:42:56 crc kubenswrapper[4857]: I1128 13:42:56.113612 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 13:42:56 crc kubenswrapper[4857]: I1128 13:42:56.114105 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 13:42:58 crc kubenswrapper[4857]: I1128 13:42:58.877519 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:58 crc kubenswrapper[4857]: I1128 13:42:58.969957 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4a0c992-d1d1-496c-b6fe-f947484ba378-combined-ca-bundle\") pod \"b4a0c992-d1d1-496c-b6fe-f947484ba378\" (UID: \"b4a0c992-d1d1-496c-b6fe-f947484ba378\") " Nov 28 13:42:58 crc kubenswrapper[4857]: I1128 13:42:58.970008 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4a0c992-d1d1-496c-b6fe-f947484ba378-config-data\") pod \"b4a0c992-d1d1-496c-b6fe-f947484ba378\" (UID: \"b4a0c992-d1d1-496c-b6fe-f947484ba378\") " Nov 28 13:42:58 crc kubenswrapper[4857]: I1128 13:42:58.970065 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rkvbl\" (UniqueName: \"kubernetes.io/projected/b4a0c992-d1d1-496c-b6fe-f947484ba378-kube-api-access-rkvbl\") pod \"b4a0c992-d1d1-496c-b6fe-f947484ba378\" (UID: \"b4a0c992-d1d1-496c-b6fe-f947484ba378\") " Nov 28 13:42:58 crc kubenswrapper[4857]: I1128 13:42:58.979937 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4a0c992-d1d1-496c-b6fe-f947484ba378-kube-api-access-rkvbl" (OuterVolumeSpecName: "kube-api-access-rkvbl") pod "b4a0c992-d1d1-496c-b6fe-f947484ba378" (UID: "b4a0c992-d1d1-496c-b6fe-f947484ba378"). InnerVolumeSpecName "kube-api-access-rkvbl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.007572 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4a0c992-d1d1-496c-b6fe-f947484ba378-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b4a0c992-d1d1-496c-b6fe-f947484ba378" (UID: "b4a0c992-d1d1-496c-b6fe-f947484ba378"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.015878 4857 generic.go:334] "Generic (PLEG): container finished" podID="b4a0c992-d1d1-496c-b6fe-f947484ba378" containerID="e091ae395c36daff41d1d0b042a64c8153752f9277a45b6dbbcb03b48c613724" exitCode=137 Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.015961 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b4a0c992-d1d1-496c-b6fe-f947484ba378","Type":"ContainerDied","Data":"e091ae395c36daff41d1d0b042a64c8153752f9277a45b6dbbcb03b48c613724"} Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.015994 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b4a0c992-d1d1-496c-b6fe-f947484ba378","Type":"ContainerDied","Data":"443d8dafa74ebd81bace6f1faceff97cf53d31791849aebf67f6986ecb53529f"} Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.016017 4857 scope.go:117] "RemoveContainer" containerID="e091ae395c36daff41d1d0b042a64c8153752f9277a45b6dbbcb03b48c613724" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.016157 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.018937 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4a0c992-d1d1-496c-b6fe-f947484ba378-config-data" (OuterVolumeSpecName: "config-data") pod "b4a0c992-d1d1-496c-b6fe-f947484ba378" (UID: "b4a0c992-d1d1-496c-b6fe-f947484ba378"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.073369 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4a0c992-d1d1-496c-b6fe-f947484ba378-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.074225 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4a0c992-d1d1-496c-b6fe-f947484ba378-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.074246 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rkvbl\" (UniqueName: \"kubernetes.io/projected/b4a0c992-d1d1-496c-b6fe-f947484ba378-kube-api-access-rkvbl\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.079312 4857 scope.go:117] "RemoveContainer" containerID="e091ae395c36daff41d1d0b042a64c8153752f9277a45b6dbbcb03b48c613724" Nov 28 13:42:59 crc kubenswrapper[4857]: E1128 13:42:59.080001 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e091ae395c36daff41d1d0b042a64c8153752f9277a45b6dbbcb03b48c613724\": container with ID starting with e091ae395c36daff41d1d0b042a64c8153752f9277a45b6dbbcb03b48c613724 not found: ID does not exist" containerID="e091ae395c36daff41d1d0b042a64c8153752f9277a45b6dbbcb03b48c613724" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.080066 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e091ae395c36daff41d1d0b042a64c8153752f9277a45b6dbbcb03b48c613724"} err="failed to get container status \"e091ae395c36daff41d1d0b042a64c8153752f9277a45b6dbbcb03b48c613724\": rpc error: code = NotFound desc = could not find container \"e091ae395c36daff41d1d0b042a64c8153752f9277a45b6dbbcb03b48c613724\": container with ID starting with e091ae395c36daff41d1d0b042a64c8153752f9277a45b6dbbcb03b48c613724 not found: ID does not exist" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.362137 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.372423 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.403579 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:42:59 crc kubenswrapper[4857]: E1128 13:42:59.404379 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4a0c992-d1d1-496c-b6fe-f947484ba378" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.404411 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4a0c992-d1d1-496c-b6fe-f947484ba378" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.404789 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4a0c992-d1d1-496c-b6fe-f947484ba378" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.405904 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.409870 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.410467 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.417599 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.419674 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.482662 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.482732 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.482836 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbpbt\" (UniqueName: \"kubernetes.io/projected/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-kube-api-access-nbpbt\") pod \"nova-cell1-novncproxy-0\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.483085 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.483136 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.584869 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.584939 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.584975 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbpbt\" (UniqueName: \"kubernetes.io/projected/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-kube-api-access-nbpbt\") pod \"nova-cell1-novncproxy-0\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.585056 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.585080 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.590095 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.590210 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.590812 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.592699 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.600703 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbpbt\" (UniqueName: \"kubernetes.io/projected/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-kube-api-access-nbpbt\") pod \"nova-cell1-novncproxy-0\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:42:59 crc kubenswrapper[4857]: I1128 13:42:59.738708 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:43:00 crc kubenswrapper[4857]: I1128 13:43:00.182212 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:43:00 crc kubenswrapper[4857]: W1128 13:43:00.184397 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6a593cc_74b3_4a02_ba7a_f4c5d7400476.slice/crio-cf59d27af77803d2eb7fcf1aca8c75a080389519bdbf5d6439918de1ac607955 WatchSource:0}: Error finding container cf59d27af77803d2eb7fcf1aca8c75a080389519bdbf5d6439918de1ac607955: Status 404 returned error can't find the container with id cf59d27af77803d2eb7fcf1aca8c75a080389519bdbf5d6439918de1ac607955 Nov 28 13:43:00 crc kubenswrapper[4857]: I1128 13:43:00.321724 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4a0c992-d1d1-496c-b6fe-f947484ba378" path="/var/lib/kubelet/pods/b4a0c992-d1d1-496c-b6fe-f947484ba378/volumes" Nov 28 13:43:00 crc kubenswrapper[4857]: I1128 13:43:00.626802 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 13:43:00 crc kubenswrapper[4857]: I1128 13:43:00.627225 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 13:43:00 crc kubenswrapper[4857]: I1128 13:43:00.627438 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 13:43:00 crc kubenswrapper[4857]: I1128 13:43:00.630082 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.041246 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b6a593cc-74b3-4a02-ba7a-f4c5d7400476","Type":"ContainerStarted","Data":"a840981835cecbc52064fb805056a67f1d699c2bf561f689da71d182309a6ea3"} Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.041642 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b6a593cc-74b3-4a02-ba7a-f4c5d7400476","Type":"ContainerStarted","Data":"cf59d27af77803d2eb7fcf1aca8c75a080389519bdbf5d6439918de1ac607955"} Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.041668 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.048523 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.075886 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.075851246 podStartE2EDuration="2.075851246s" podCreationTimestamp="2025-11-28 13:42:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:43:01.066951459 +0000 UTC m=+1473.094326696" watchObservedRunningTime="2025-11-28 13:43:01.075851246 +0000 UTC m=+1473.103226453" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.296514 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-h799k"] Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.303517 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.349776 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-h799k"] Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.427006 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82xwn\" (UniqueName: \"kubernetes.io/projected/0cb677df-7237-4b82-8806-d7abedfad40c-kube-api-access-82xwn\") pod \"dnsmasq-dns-cd5cbd7b9-h799k\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.427100 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-h799k\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.427158 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-h799k\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.427182 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-h799k\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.427229 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-config\") pod \"dnsmasq-dns-cd5cbd7b9-h799k\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.427275 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-h799k\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.528947 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82xwn\" (UniqueName: \"kubernetes.io/projected/0cb677df-7237-4b82-8806-d7abedfad40c-kube-api-access-82xwn\") pod \"dnsmasq-dns-cd5cbd7b9-h799k\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.529023 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-h799k\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.529062 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-h799k\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.529087 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-h799k\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.529115 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-config\") pod \"dnsmasq-dns-cd5cbd7b9-h799k\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.529154 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-h799k\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.529906 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-h799k\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.529959 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-h799k\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.530198 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-h799k\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.530452 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-config\") pod \"dnsmasq-dns-cd5cbd7b9-h799k\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.530487 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-h799k\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.549496 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82xwn\" (UniqueName: \"kubernetes.io/projected/0cb677df-7237-4b82-8806-d7abedfad40c-kube-api-access-82xwn\") pod \"dnsmasq-dns-cd5cbd7b9-h799k\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.641142 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:02 crc kubenswrapper[4857]: I1128 13:43:02.166211 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-h799k"] Nov 28 13:43:03 crc kubenswrapper[4857]: I1128 13:43:03.060189 4857 generic.go:334] "Generic (PLEG): container finished" podID="0cb677df-7237-4b82-8806-d7abedfad40c" containerID="54250c0eff6684e824788b7e12d3730da9501d1501683c7fd89bd82f1a19cc5d" exitCode=0 Nov 28 13:43:03 crc kubenswrapper[4857]: I1128 13:43:03.060293 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" event={"ID":"0cb677df-7237-4b82-8806-d7abedfad40c","Type":"ContainerDied","Data":"54250c0eff6684e824788b7e12d3730da9501d1501683c7fd89bd82f1a19cc5d"} Nov 28 13:43:03 crc kubenswrapper[4857]: I1128 13:43:03.060634 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" event={"ID":"0cb677df-7237-4b82-8806-d7abedfad40c","Type":"ContainerStarted","Data":"a8612ed633c49d689316f547b420399bf07d8c73db8c5afb78385d362beaca55"} Nov 28 13:43:03 crc kubenswrapper[4857]: I1128 13:43:03.356685 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:43:03 crc kubenswrapper[4857]: I1128 13:43:03.357398 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerName="ceilometer-central-agent" containerID="cri-o://28c3d4cb3ecd46fc90613fe6166df37aa4f90d2300053b277246858435c0381d" gracePeriod=30 Nov 28 13:43:03 crc kubenswrapper[4857]: I1128 13:43:03.357440 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerName="proxy-httpd" containerID="cri-o://0d1a68176621baedb73d3db60f74244e357a0d27fc4513ba172924f0ab7c1aa2" gracePeriod=30 Nov 28 13:43:03 crc kubenswrapper[4857]: I1128 13:43:03.357557 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerName="sg-core" containerID="cri-o://8ae04268da464f2ee7de89ef09584bf5d6d86e180022a6df58565902f0c83921" gracePeriod=30 Nov 28 13:43:03 crc kubenswrapper[4857]: I1128 13:43:03.357588 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerName="ceilometer-notification-agent" containerID="cri-o://de21ec0fca45734ca9b1e00a72ff9590914c3ff1fa1ca16fa7dec64d8b1c0cd8" gracePeriod=30 Nov 28 13:43:03 crc kubenswrapper[4857]: I1128 13:43:03.369590 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.192:3000/\": read tcp 10.217.0.2:46348->10.217.0.192:3000: read: connection reset by peer" Nov 28 13:43:03 crc kubenswrapper[4857]: I1128 13:43:03.788694 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:43:04 crc kubenswrapper[4857]: I1128 13:43:04.073232 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" event={"ID":"0cb677df-7237-4b82-8806-d7abedfad40c","Type":"ContainerStarted","Data":"54ebf8119e8b85e98e03f36c99c69b277451e9d96ff0dda0b092e460eb535292"} Nov 28 13:43:04 crc kubenswrapper[4857]: I1128 13:43:04.074961 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:04 crc kubenswrapper[4857]: I1128 13:43:04.079252 4857 generic.go:334] "Generic (PLEG): container finished" podID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerID="0d1a68176621baedb73d3db60f74244e357a0d27fc4513ba172924f0ab7c1aa2" exitCode=0 Nov 28 13:43:04 crc kubenswrapper[4857]: I1128 13:43:04.079281 4857 generic.go:334] "Generic (PLEG): container finished" podID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerID="8ae04268da464f2ee7de89ef09584bf5d6d86e180022a6df58565902f0c83921" exitCode=2 Nov 28 13:43:04 crc kubenswrapper[4857]: I1128 13:43:04.079290 4857 generic.go:334] "Generic (PLEG): container finished" podID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerID="28c3d4cb3ecd46fc90613fe6166df37aa4f90d2300053b277246858435c0381d" exitCode=0 Nov 28 13:43:04 crc kubenswrapper[4857]: I1128 13:43:04.079446 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="de6e2aaf-df58-4db9-86e8-6491e5bb332e" containerName="nova-api-log" containerID="cri-o://8e01a4489e7a626d8a2381f15f652da57c8463af2e23c47136eeae53f28cd831" gracePeriod=30 Nov 28 13:43:04 crc kubenswrapper[4857]: I1128 13:43:04.080570 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443","Type":"ContainerDied","Data":"0d1a68176621baedb73d3db60f74244e357a0d27fc4513ba172924f0ab7c1aa2"} Nov 28 13:43:04 crc kubenswrapper[4857]: I1128 13:43:04.080604 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443","Type":"ContainerDied","Data":"8ae04268da464f2ee7de89ef09584bf5d6d86e180022a6df58565902f0c83921"} Nov 28 13:43:04 crc kubenswrapper[4857]: I1128 13:43:04.080614 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443","Type":"ContainerDied","Data":"28c3d4cb3ecd46fc90613fe6166df37aa4f90d2300053b277246858435c0381d"} Nov 28 13:43:04 crc kubenswrapper[4857]: I1128 13:43:04.080661 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="de6e2aaf-df58-4db9-86e8-6491e5bb332e" containerName="nova-api-api" containerID="cri-o://f2230527c267ffccf105a9abb6f07e9e0fd2ec753efb7ea5ebcce291e954d89f" gracePeriod=30 Nov 28 13:43:04 crc kubenswrapper[4857]: I1128 13:43:04.103939 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" podStartSLOduration=3.103924622 podStartE2EDuration="3.103924622s" podCreationTimestamp="2025-11-28 13:43:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:43:04.099798203 +0000 UTC m=+1476.127173380" watchObservedRunningTime="2025-11-28 13:43:04.103924622 +0000 UTC m=+1476.131299789" Nov 28 13:43:04 crc kubenswrapper[4857]: I1128 13:43:04.739519 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:43:05 crc kubenswrapper[4857]: I1128 13:43:05.092412 4857 generic.go:334] "Generic (PLEG): container finished" podID="de6e2aaf-df58-4db9-86e8-6491e5bb332e" containerID="8e01a4489e7a626d8a2381f15f652da57c8463af2e23c47136eeae53f28cd831" exitCode=143 Nov 28 13:43:05 crc kubenswrapper[4857]: I1128 13:43:05.093494 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"de6e2aaf-df58-4db9-86e8-6491e5bb332e","Type":"ContainerDied","Data":"8e01a4489e7a626d8a2381f15f652da57c8463af2e23c47136eeae53f28cd831"} Nov 28 13:43:07 crc kubenswrapper[4857]: I1128 13:43:07.723822 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:43:07 crc kubenswrapper[4857]: I1128 13:43:07.751144 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9j9kt\" (UniqueName: \"kubernetes.io/projected/de6e2aaf-df58-4db9-86e8-6491e5bb332e-kube-api-access-9j9kt\") pod \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\" (UID: \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\") " Nov 28 13:43:07 crc kubenswrapper[4857]: I1128 13:43:07.751220 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de6e2aaf-df58-4db9-86e8-6491e5bb332e-config-data\") pod \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\" (UID: \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\") " Nov 28 13:43:07 crc kubenswrapper[4857]: I1128 13:43:07.751247 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de6e2aaf-df58-4db9-86e8-6491e5bb332e-combined-ca-bundle\") pod \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\" (UID: \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\") " Nov 28 13:43:07 crc kubenswrapper[4857]: I1128 13:43:07.751409 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de6e2aaf-df58-4db9-86e8-6491e5bb332e-logs\") pod \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\" (UID: \"de6e2aaf-df58-4db9-86e8-6491e5bb332e\") " Nov 28 13:43:07 crc kubenswrapper[4857]: I1128 13:43:07.752347 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de6e2aaf-df58-4db9-86e8-6491e5bb332e-logs" (OuterVolumeSpecName: "logs") pod "de6e2aaf-df58-4db9-86e8-6491e5bb332e" (UID: "de6e2aaf-df58-4db9-86e8-6491e5bb332e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:07 crc kubenswrapper[4857]: I1128 13:43:07.760956 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de6e2aaf-df58-4db9-86e8-6491e5bb332e-kube-api-access-9j9kt" (OuterVolumeSpecName: "kube-api-access-9j9kt") pod "de6e2aaf-df58-4db9-86e8-6491e5bb332e" (UID: "de6e2aaf-df58-4db9-86e8-6491e5bb332e"). InnerVolumeSpecName "kube-api-access-9j9kt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:43:07 crc kubenswrapper[4857]: I1128 13:43:07.792689 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de6e2aaf-df58-4db9-86e8-6491e5bb332e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "de6e2aaf-df58-4db9-86e8-6491e5bb332e" (UID: "de6e2aaf-df58-4db9-86e8-6491e5bb332e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:07 crc kubenswrapper[4857]: I1128 13:43:07.810940 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de6e2aaf-df58-4db9-86e8-6491e5bb332e-config-data" (OuterVolumeSpecName: "config-data") pod "de6e2aaf-df58-4db9-86e8-6491e5bb332e" (UID: "de6e2aaf-df58-4db9-86e8-6491e5bb332e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:07 crc kubenswrapper[4857]: I1128 13:43:07.854079 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9j9kt\" (UniqueName: \"kubernetes.io/projected/de6e2aaf-df58-4db9-86e8-6491e5bb332e-kube-api-access-9j9kt\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:07 crc kubenswrapper[4857]: I1128 13:43:07.854144 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de6e2aaf-df58-4db9-86e8-6491e5bb332e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:07 crc kubenswrapper[4857]: I1128 13:43:07.854158 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de6e2aaf-df58-4db9-86e8-6491e5bb332e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:07 crc kubenswrapper[4857]: I1128 13:43:07.854170 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de6e2aaf-df58-4db9-86e8-6491e5bb332e-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.132080 4857 generic.go:334] "Generic (PLEG): container finished" podID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerID="de21ec0fca45734ca9b1e00a72ff9590914c3ff1fa1ca16fa7dec64d8b1c0cd8" exitCode=0 Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.132116 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443","Type":"ContainerDied","Data":"de21ec0fca45734ca9b1e00a72ff9590914c3ff1fa1ca16fa7dec64d8b1c0cd8"} Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.135694 4857 generic.go:334] "Generic (PLEG): container finished" podID="de6e2aaf-df58-4db9-86e8-6491e5bb332e" containerID="f2230527c267ffccf105a9abb6f07e9e0fd2ec753efb7ea5ebcce291e954d89f" exitCode=0 Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.135740 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"de6e2aaf-df58-4db9-86e8-6491e5bb332e","Type":"ContainerDied","Data":"f2230527c267ffccf105a9abb6f07e9e0fd2ec753efb7ea5ebcce291e954d89f"} Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.135786 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"de6e2aaf-df58-4db9-86e8-6491e5bb332e","Type":"ContainerDied","Data":"56ee72b563a4eed87e0bcfc288988584b2ead19745136b033563446a2802ca77"} Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.135807 4857 scope.go:117] "RemoveContainer" containerID="f2230527c267ffccf105a9abb6f07e9e0fd2ec753efb7ea5ebcce291e954d89f" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.135945 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.198499 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.202876 4857 scope.go:117] "RemoveContainer" containerID="8e01a4489e7a626d8a2381f15f652da57c8463af2e23c47136eeae53f28cd831" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.203159 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.210541 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.220961 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 13:43:08 crc kubenswrapper[4857]: E1128 13:43:08.221368 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerName="ceilometer-central-agent" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.221388 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerName="ceilometer-central-agent" Nov 28 13:43:08 crc kubenswrapper[4857]: E1128 13:43:08.221411 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerName="proxy-httpd" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.221421 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerName="proxy-httpd" Nov 28 13:43:08 crc kubenswrapper[4857]: E1128 13:43:08.221441 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de6e2aaf-df58-4db9-86e8-6491e5bb332e" containerName="nova-api-api" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.221449 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="de6e2aaf-df58-4db9-86e8-6491e5bb332e" containerName="nova-api-api" Nov 28 13:43:08 crc kubenswrapper[4857]: E1128 13:43:08.221463 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de6e2aaf-df58-4db9-86e8-6491e5bb332e" containerName="nova-api-log" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.221470 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="de6e2aaf-df58-4db9-86e8-6491e5bb332e" containerName="nova-api-log" Nov 28 13:43:08 crc kubenswrapper[4857]: E1128 13:43:08.221484 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerName="sg-core" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.221491 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerName="sg-core" Nov 28 13:43:08 crc kubenswrapper[4857]: E1128 13:43:08.221514 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerName="ceilometer-notification-agent" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.221522 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerName="ceilometer-notification-agent" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.221809 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerName="ceilometer-central-agent" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.221832 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerName="proxy-httpd" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.221849 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="de6e2aaf-df58-4db9-86e8-6491e5bb332e" containerName="nova-api-log" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.221861 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerName="sg-core" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.221881 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" containerName="ceilometer-notification-agent" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.221896 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="de6e2aaf-df58-4db9-86e8-6491e5bb332e" containerName="nova-api-api" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.222895 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.226033 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.226262 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.226380 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.256475 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.263480 4857 scope.go:117] "RemoveContainer" containerID="f2230527c267ffccf105a9abb6f07e9e0fd2ec753efb7ea5ebcce291e954d89f" Nov 28 13:43:08 crc kubenswrapper[4857]: E1128 13:43:08.264229 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2230527c267ffccf105a9abb6f07e9e0fd2ec753efb7ea5ebcce291e954d89f\": container with ID starting with f2230527c267ffccf105a9abb6f07e9e0fd2ec753efb7ea5ebcce291e954d89f not found: ID does not exist" containerID="f2230527c267ffccf105a9abb6f07e9e0fd2ec753efb7ea5ebcce291e954d89f" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.264301 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2230527c267ffccf105a9abb6f07e9e0fd2ec753efb7ea5ebcce291e954d89f"} err="failed to get container status \"f2230527c267ffccf105a9abb6f07e9e0fd2ec753efb7ea5ebcce291e954d89f\": rpc error: code = NotFound desc = could not find container \"f2230527c267ffccf105a9abb6f07e9e0fd2ec753efb7ea5ebcce291e954d89f\": container with ID starting with f2230527c267ffccf105a9abb6f07e9e0fd2ec753efb7ea5ebcce291e954d89f not found: ID does not exist" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.264329 4857 scope.go:117] "RemoveContainer" containerID="8e01a4489e7a626d8a2381f15f652da57c8463af2e23c47136eeae53f28cd831" Nov 28 13:43:08 crc kubenswrapper[4857]: E1128 13:43:08.265003 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e01a4489e7a626d8a2381f15f652da57c8463af2e23c47136eeae53f28cd831\": container with ID starting with 8e01a4489e7a626d8a2381f15f652da57c8463af2e23c47136eeae53f28cd831 not found: ID does not exist" containerID="8e01a4489e7a626d8a2381f15f652da57c8463af2e23c47136eeae53f28cd831" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.265048 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e01a4489e7a626d8a2381f15f652da57c8463af2e23c47136eeae53f28cd831"} err="failed to get container status \"8e01a4489e7a626d8a2381f15f652da57c8463af2e23c47136eeae53f28cd831\": rpc error: code = NotFound desc = could not find container \"8e01a4489e7a626d8a2381f15f652da57c8463af2e23c47136eeae53f28cd831\": container with ID starting with 8e01a4489e7a626d8a2381f15f652da57c8463af2e23c47136eeae53f28cd831 not found: ID does not exist" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.274084 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-ceilometer-tls-certs\") pod \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.274129 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-sg-core-conf-yaml\") pod \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.274151 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-scripts\") pod \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.274216 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-config-data\") pod \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.274277 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-combined-ca-bundle\") pod \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.274308 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rjlkv\" (UniqueName: \"kubernetes.io/projected/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-kube-api-access-rjlkv\") pod \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.274436 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-log-httpd\") pod \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.274500 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-run-httpd\") pod \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\" (UID: \"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443\") " Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.274988 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2f49f\" (UniqueName: \"kubernetes.io/projected/b1265be9-7097-4d62-aa44-c64c7bf3df52-kube-api-access-2f49f\") pod \"nova-api-0\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.275072 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.275112 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-internal-tls-certs\") pod \"nova-api-0\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.275185 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1265be9-7097-4d62-aa44-c64c7bf3df52-logs\") pod \"nova-api-0\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.275246 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-public-tls-certs\") pod \"nova-api-0\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.275330 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-config-data\") pod \"nova-api-0\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.277474 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" (UID: "8f8ac068-4b8e-4d9f-90af-eb4bcf19c443"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.277620 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" (UID: "8f8ac068-4b8e-4d9f-90af-eb4bcf19c443"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.280381 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-scripts" (OuterVolumeSpecName: "scripts") pod "8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" (UID: "8f8ac068-4b8e-4d9f-90af-eb4bcf19c443"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.283265 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-kube-api-access-rjlkv" (OuterVolumeSpecName: "kube-api-access-rjlkv") pod "8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" (UID: "8f8ac068-4b8e-4d9f-90af-eb4bcf19c443"). InnerVolumeSpecName "kube-api-access-rjlkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.303844 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" (UID: "8f8ac068-4b8e-4d9f-90af-eb4bcf19c443"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.326687 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de6e2aaf-df58-4db9-86e8-6491e5bb332e" path="/var/lib/kubelet/pods/de6e2aaf-df58-4db9-86e8-6491e5bb332e/volumes" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.340031 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" (UID: "8f8ac068-4b8e-4d9f-90af-eb4bcf19c443"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.376631 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-config-data\") pod \"nova-api-0\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.377544 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2f49f\" (UniqueName: \"kubernetes.io/projected/b1265be9-7097-4d62-aa44-c64c7bf3df52-kube-api-access-2f49f\") pod \"nova-api-0\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.377830 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.377874 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-internal-tls-certs\") pod \"nova-api-0\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.377961 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1265be9-7097-4d62-aa44-c64c7bf3df52-logs\") pod \"nova-api-0\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.378043 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-public-tls-certs\") pod \"nova-api-0\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.378078 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" (UID: "8f8ac068-4b8e-4d9f-90af-eb4bcf19c443"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.378384 4857 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.378561 4857 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.378597 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.378612 4857 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.378625 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rjlkv\" (UniqueName: \"kubernetes.io/projected/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-kube-api-access-rjlkv\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.378638 4857 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.378663 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1265be9-7097-4d62-aa44-c64c7bf3df52-logs\") pod \"nova-api-0\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.382496 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-public-tls-certs\") pod \"nova-api-0\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.382667 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-config-data\") pod \"nova-api-0\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.383289 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.393560 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2f49f\" (UniqueName: \"kubernetes.io/projected/b1265be9-7097-4d62-aa44-c64c7bf3df52-kube-api-access-2f49f\") pod \"nova-api-0\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.395915 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-internal-tls-certs\") pod \"nova-api-0\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " pod="openstack/nova-api-0" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.405480 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-config-data" (OuterVolumeSpecName: "config-data") pod "8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" (UID: "8f8ac068-4b8e-4d9f-90af-eb4bcf19c443"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.480194 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.480226 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:08 crc kubenswrapper[4857]: I1128 13:43:08.560010 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.154403 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8ac068-4b8e-4d9f-90af-eb4bcf19c443","Type":"ContainerDied","Data":"75d2ef7067e3baac71e8adeb213f3c4452ab78a203920b6d5df7688a736af4cc"} Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.154812 4857 scope.go:117] "RemoveContainer" containerID="0d1a68176621baedb73d3db60f74244e357a0d27fc4513ba172924f0ab7c1aa2" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.154965 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.163177 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.203878 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.204305 4857 scope.go:117] "RemoveContainer" containerID="8ae04268da464f2ee7de89ef09584bf5d6d86e180022a6df58565902f0c83921" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.216972 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.226927 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.232086 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.234466 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.234632 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.234798 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.249429 4857 scope.go:117] "RemoveContainer" containerID="de21ec0fca45734ca9b1e00a72ff9590914c3ff1fa1ca16fa7dec64d8b1c0cd8" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.251128 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.297344 4857 scope.go:117] "RemoveContainer" containerID="28c3d4cb3ecd46fc90613fe6166df37aa4f90d2300053b277246858435c0381d" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.303577 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.303609 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-scripts\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.303846 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc11fd89-0365-46e5-b8b1-48f933611ab9-log-httpd\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.304135 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.304214 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc11fd89-0365-46e5-b8b1-48f933611ab9-run-httpd\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.304243 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-config-data\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.304332 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.304484 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4h48c\" (UniqueName: \"kubernetes.io/projected/cc11fd89-0365-46e5-b8b1-48f933611ab9-kube-api-access-4h48c\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.406420 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc11fd89-0365-46e5-b8b1-48f933611ab9-log-httpd\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.406529 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.406563 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc11fd89-0365-46e5-b8b1-48f933611ab9-run-httpd\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.406580 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-config-data\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.406620 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.406669 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4h48c\" (UniqueName: \"kubernetes.io/projected/cc11fd89-0365-46e5-b8b1-48f933611ab9-kube-api-access-4h48c\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.406715 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.406734 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-scripts\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.407283 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc11fd89-0365-46e5-b8b1-48f933611ab9-log-httpd\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.408988 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc11fd89-0365-46e5-b8b1-48f933611ab9-run-httpd\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.415161 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.415354 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-config-data\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.416442 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.416487 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-scripts\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.431900 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4h48c\" (UniqueName: \"kubernetes.io/projected/cc11fd89-0365-46e5-b8b1-48f933611ab9-kube-api-access-4h48c\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.441855 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.555222 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.742903 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:43:09 crc kubenswrapper[4857]: I1128 13:43:09.769065 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.027767 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:43:10 crc kubenswrapper[4857]: W1128 13:43:10.027915 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcc11fd89_0365_46e5_b8b1_48f933611ab9.slice/crio-d30b11297e0ac5eab21bf0d14af080c711877cf97dca7a255c7c61b7980bb439 WatchSource:0}: Error finding container d30b11297e0ac5eab21bf0d14af080c711877cf97dca7a255c7c61b7980bb439: Status 404 returned error can't find the container with id d30b11297e0ac5eab21bf0d14af080c711877cf97dca7a255c7c61b7980bb439 Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.172784 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc11fd89-0365-46e5-b8b1-48f933611ab9","Type":"ContainerStarted","Data":"d30b11297e0ac5eab21bf0d14af080c711877cf97dca7a255c7c61b7980bb439"} Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.174792 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b1265be9-7097-4d62-aa44-c64c7bf3df52","Type":"ContainerStarted","Data":"25b990ff85ae73e5ffbc8ae0fd4bcc1c0c6610da4c54dd6bf6d6722b872dbe3a"} Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.174840 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b1265be9-7097-4d62-aa44-c64c7bf3df52","Type":"ContainerStarted","Data":"00b8ca7a75dc677dcdb19febab64976b8c725b5cb1eddce840a03a18cc529897"} Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.174855 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b1265be9-7097-4d62-aa44-c64c7bf3df52","Type":"ContainerStarted","Data":"6e4fb43cb8f553d9df735cd82300f61b64493b077d387af84bae94c0dad0dc0f"} Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.203923 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.283255 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.283236767 podStartE2EDuration="2.283236767s" podCreationTimestamp="2025-11-28 13:43:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:43:10.20125414 +0000 UTC m=+1482.228629387" watchObservedRunningTime="2025-11-28 13:43:10.283236767 +0000 UTC m=+1482.310611934" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.349302 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f8ac068-4b8e-4d9f-90af-eb4bcf19c443" path="/var/lib/kubelet/pods/8f8ac068-4b8e-4d9f-90af-eb4bcf19c443/volumes" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.437713 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-dx7fm"] Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.439217 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-dx7fm" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.441145 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.441352 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.446252 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-dx7fm"] Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.587978 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-dx7fm\" (UID: \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\") " pod="openstack/nova-cell1-cell-mapping-dx7fm" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.588346 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-config-data\") pod \"nova-cell1-cell-mapping-dx7fm\" (UID: \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\") " pod="openstack/nova-cell1-cell-mapping-dx7fm" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.588386 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-scripts\") pod \"nova-cell1-cell-mapping-dx7fm\" (UID: \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\") " pod="openstack/nova-cell1-cell-mapping-dx7fm" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.588455 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jwnb\" (UniqueName: \"kubernetes.io/projected/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-kube-api-access-4jwnb\") pod \"nova-cell1-cell-mapping-dx7fm\" (UID: \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\") " pod="openstack/nova-cell1-cell-mapping-dx7fm" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.690242 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-dx7fm\" (UID: \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\") " pod="openstack/nova-cell1-cell-mapping-dx7fm" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.690326 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-config-data\") pod \"nova-cell1-cell-mapping-dx7fm\" (UID: \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\") " pod="openstack/nova-cell1-cell-mapping-dx7fm" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.690374 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-scripts\") pod \"nova-cell1-cell-mapping-dx7fm\" (UID: \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\") " pod="openstack/nova-cell1-cell-mapping-dx7fm" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.690566 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jwnb\" (UniqueName: \"kubernetes.io/projected/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-kube-api-access-4jwnb\") pod \"nova-cell1-cell-mapping-dx7fm\" (UID: \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\") " pod="openstack/nova-cell1-cell-mapping-dx7fm" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.694509 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-scripts\") pod \"nova-cell1-cell-mapping-dx7fm\" (UID: \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\") " pod="openstack/nova-cell1-cell-mapping-dx7fm" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.695078 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-dx7fm\" (UID: \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\") " pod="openstack/nova-cell1-cell-mapping-dx7fm" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.698228 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-config-data\") pod \"nova-cell1-cell-mapping-dx7fm\" (UID: \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\") " pod="openstack/nova-cell1-cell-mapping-dx7fm" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.707961 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jwnb\" (UniqueName: \"kubernetes.io/projected/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-kube-api-access-4jwnb\") pod \"nova-cell1-cell-mapping-dx7fm\" (UID: \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\") " pod="openstack/nova-cell1-cell-mapping-dx7fm" Nov 28 13:43:10 crc kubenswrapper[4857]: I1128 13:43:10.771329 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-dx7fm" Nov 28 13:43:11 crc kubenswrapper[4857]: I1128 13:43:11.188449 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc11fd89-0365-46e5-b8b1-48f933611ab9","Type":"ContainerStarted","Data":"3e988a9d71b894b528cad9cf749fa687e397d909171f243b57b66253d5c4fcf4"} Nov 28 13:43:11 crc kubenswrapper[4857]: I1128 13:43:11.224532 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-dx7fm"] Nov 28 13:43:11 crc kubenswrapper[4857]: W1128 13:43:11.226523 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod10b0ab88_8db9_4c9d_bc03_a9da374a33ca.slice/crio-0d6403853444b94db4c6a59518a2399d76eff0cd5eb98d184301e3f13b48d8e3 WatchSource:0}: Error finding container 0d6403853444b94db4c6a59518a2399d76eff0cd5eb98d184301e3f13b48d8e3: Status 404 returned error can't find the container with id 0d6403853444b94db4c6a59518a2399d76eff0cd5eb98d184301e3f13b48d8e3 Nov 28 13:43:11 crc kubenswrapper[4857]: I1128 13:43:11.642967 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:43:11 crc kubenswrapper[4857]: I1128 13:43:11.738613 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-4pk9b"] Nov 28 13:43:11 crc kubenswrapper[4857]: I1128 13:43:11.738879 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" podUID="78b07ef1-c929-45b8-b3e2-f0370c174054" containerName="dnsmasq-dns" containerID="cri-o://63eb95ec2e69722a2f6c90db8f9801bde76c0087e6b2a2cbcd28e91a38b5eb2d" gracePeriod=10 Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.211523 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc11fd89-0365-46e5-b8b1-48f933611ab9","Type":"ContainerStarted","Data":"bcfc962c58adc4335b9f934de14d9b2330cea19877b855df32060d57a9431c59"} Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.221427 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-dx7fm" event={"ID":"10b0ab88-8db9-4c9d-bc03-a9da374a33ca","Type":"ContainerStarted","Data":"64b64c3a1efcb9350b410451f1a084657b11967bf78371a018aa5e7c9647b4ed"} Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.221477 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-dx7fm" event={"ID":"10b0ab88-8db9-4c9d-bc03-a9da374a33ca","Type":"ContainerStarted","Data":"0d6403853444b94db4c6a59518a2399d76eff0cd5eb98d184301e3f13b48d8e3"} Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.230529 4857 generic.go:334] "Generic (PLEG): container finished" podID="78b07ef1-c929-45b8-b3e2-f0370c174054" containerID="63eb95ec2e69722a2f6c90db8f9801bde76c0087e6b2a2cbcd28e91a38b5eb2d" exitCode=0 Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.230575 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" event={"ID":"78b07ef1-c929-45b8-b3e2-f0370c174054","Type":"ContainerDied","Data":"63eb95ec2e69722a2f6c90db8f9801bde76c0087e6b2a2cbcd28e91a38b5eb2d"} Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.254581 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-dx7fm" podStartSLOduration=2.254555246 podStartE2EDuration="2.254555246s" podCreationTimestamp="2025-11-28 13:43:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:43:12.24149281 +0000 UTC m=+1484.268867977" watchObservedRunningTime="2025-11-28 13:43:12.254555246 +0000 UTC m=+1484.281930413" Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.430288 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.574952 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-dns-svc\") pod \"78b07ef1-c929-45b8-b3e2-f0370c174054\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.575068 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-config\") pod \"78b07ef1-c929-45b8-b3e2-f0370c174054\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.575105 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-ovsdbserver-sb\") pod \"78b07ef1-c929-45b8-b3e2-f0370c174054\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.575134 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-ovsdbserver-nb\") pod \"78b07ef1-c929-45b8-b3e2-f0370c174054\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.575185 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jl5pp\" (UniqueName: \"kubernetes.io/projected/78b07ef1-c929-45b8-b3e2-f0370c174054-kube-api-access-jl5pp\") pod \"78b07ef1-c929-45b8-b3e2-f0370c174054\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.575222 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-dns-swift-storage-0\") pod \"78b07ef1-c929-45b8-b3e2-f0370c174054\" (UID: \"78b07ef1-c929-45b8-b3e2-f0370c174054\") " Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.580102 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78b07ef1-c929-45b8-b3e2-f0370c174054-kube-api-access-jl5pp" (OuterVolumeSpecName: "kube-api-access-jl5pp") pod "78b07ef1-c929-45b8-b3e2-f0370c174054" (UID: "78b07ef1-c929-45b8-b3e2-f0370c174054"). InnerVolumeSpecName "kube-api-access-jl5pp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.631728 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "78b07ef1-c929-45b8-b3e2-f0370c174054" (UID: "78b07ef1-c929-45b8-b3e2-f0370c174054"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.636533 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "78b07ef1-c929-45b8-b3e2-f0370c174054" (UID: "78b07ef1-c929-45b8-b3e2-f0370c174054"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.649600 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "78b07ef1-c929-45b8-b3e2-f0370c174054" (UID: "78b07ef1-c929-45b8-b3e2-f0370c174054"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.659185 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-config" (OuterVolumeSpecName: "config") pod "78b07ef1-c929-45b8-b3e2-f0370c174054" (UID: "78b07ef1-c929-45b8-b3e2-f0370c174054"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.672236 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "78b07ef1-c929-45b8-b3e2-f0370c174054" (UID: "78b07ef1-c929-45b8-b3e2-f0370c174054"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.679437 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.679470 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.679480 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.679498 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.679514 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jl5pp\" (UniqueName: \"kubernetes.io/projected/78b07ef1-c929-45b8-b3e2-f0370c174054-kube-api-access-jl5pp\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.679524 4857 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/78b07ef1-c929-45b8-b3e2-f0370c174054-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:13 crc kubenswrapper[4857]: I1128 13:43:13.254281 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" event={"ID":"78b07ef1-c929-45b8-b3e2-f0370c174054","Type":"ContainerDied","Data":"724cb9f647b1a9a098415a89d864d320a373bb3708cf69805af30d05332651c1"} Nov 28 13:43:13 crc kubenswrapper[4857]: I1128 13:43:13.254680 4857 scope.go:117] "RemoveContainer" containerID="63eb95ec2e69722a2f6c90db8f9801bde76c0087e6b2a2cbcd28e91a38b5eb2d" Nov 28 13:43:13 crc kubenswrapper[4857]: I1128 13:43:13.254907 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-4pk9b" Nov 28 13:43:13 crc kubenswrapper[4857]: I1128 13:43:13.267917 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc11fd89-0365-46e5-b8b1-48f933611ab9","Type":"ContainerStarted","Data":"53c5151a4983e3c03ad2115ba0190cda5364aa8956976486e4a3dda5c19894bc"} Nov 28 13:43:13 crc kubenswrapper[4857]: I1128 13:43:13.292964 4857 scope.go:117] "RemoveContainer" containerID="a43911419dc796745dcba6232a2a82896bc82cd1ff6a08213d38500e17c66c95" Nov 28 13:43:13 crc kubenswrapper[4857]: I1128 13:43:13.319874 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-4pk9b"] Nov 28 13:43:13 crc kubenswrapper[4857]: I1128 13:43:13.329946 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-4pk9b"] Nov 28 13:43:14 crc kubenswrapper[4857]: I1128 13:43:14.280826 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc11fd89-0365-46e5-b8b1-48f933611ab9","Type":"ContainerStarted","Data":"758611f27d908e2a9d4f2cb15d9c474f4f04bb2c788bba7c25fe962588bee8ea"} Nov 28 13:43:14 crc kubenswrapper[4857]: I1128 13:43:14.281262 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 13:43:14 crc kubenswrapper[4857]: I1128 13:43:14.335908 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78b07ef1-c929-45b8-b3e2-f0370c174054" path="/var/lib/kubelet/pods/78b07ef1-c929-45b8-b3e2-f0370c174054/volumes" Nov 28 13:43:17 crc kubenswrapper[4857]: I1128 13:43:17.310656 4857 generic.go:334] "Generic (PLEG): container finished" podID="10b0ab88-8db9-4c9d-bc03-a9da374a33ca" containerID="64b64c3a1efcb9350b410451f1a084657b11967bf78371a018aa5e7c9647b4ed" exitCode=0 Nov 28 13:43:17 crc kubenswrapper[4857]: I1128 13:43:17.310726 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-dx7fm" event={"ID":"10b0ab88-8db9-4c9d-bc03-a9da374a33ca","Type":"ContainerDied","Data":"64b64c3a1efcb9350b410451f1a084657b11967bf78371a018aa5e7c9647b4ed"} Nov 28 13:43:17 crc kubenswrapper[4857]: I1128 13:43:17.329453 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=4.602706227 podStartE2EDuration="8.329437641s" podCreationTimestamp="2025-11-28 13:43:09 +0000 UTC" firstStartedPulling="2025-11-28 13:43:10.030324773 +0000 UTC m=+1482.057699950" lastFinishedPulling="2025-11-28 13:43:13.757056157 +0000 UTC m=+1485.784431364" observedRunningTime="2025-11-28 13:43:14.316919432 +0000 UTC m=+1486.344294629" watchObservedRunningTime="2025-11-28 13:43:17.329437641 +0000 UTC m=+1489.356812808" Nov 28 13:43:18 crc kubenswrapper[4857]: I1128 13:43:18.561697 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 13:43:18 crc kubenswrapper[4857]: I1128 13:43:18.562059 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 13:43:18 crc kubenswrapper[4857]: I1128 13:43:18.741199 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-dx7fm" Nov 28 13:43:18 crc kubenswrapper[4857]: I1128 13:43:18.786234 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jwnb\" (UniqueName: \"kubernetes.io/projected/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-kube-api-access-4jwnb\") pod \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\" (UID: \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\") " Nov 28 13:43:18 crc kubenswrapper[4857]: I1128 13:43:18.786292 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-combined-ca-bundle\") pod \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\" (UID: \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\") " Nov 28 13:43:18 crc kubenswrapper[4857]: I1128 13:43:18.786326 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-scripts\") pod \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\" (UID: \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\") " Nov 28 13:43:18 crc kubenswrapper[4857]: I1128 13:43:18.786419 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-config-data\") pod \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\" (UID: \"10b0ab88-8db9-4c9d-bc03-a9da374a33ca\") " Nov 28 13:43:18 crc kubenswrapper[4857]: I1128 13:43:18.793188 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-kube-api-access-4jwnb" (OuterVolumeSpecName: "kube-api-access-4jwnb") pod "10b0ab88-8db9-4c9d-bc03-a9da374a33ca" (UID: "10b0ab88-8db9-4c9d-bc03-a9da374a33ca"). InnerVolumeSpecName "kube-api-access-4jwnb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:43:18 crc kubenswrapper[4857]: I1128 13:43:18.793273 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-scripts" (OuterVolumeSpecName: "scripts") pod "10b0ab88-8db9-4c9d-bc03-a9da374a33ca" (UID: "10b0ab88-8db9-4c9d-bc03-a9da374a33ca"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:18 crc kubenswrapper[4857]: I1128 13:43:18.826050 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "10b0ab88-8db9-4c9d-bc03-a9da374a33ca" (UID: "10b0ab88-8db9-4c9d-bc03-a9da374a33ca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:18 crc kubenswrapper[4857]: I1128 13:43:18.833588 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-config-data" (OuterVolumeSpecName: "config-data") pod "10b0ab88-8db9-4c9d-bc03-a9da374a33ca" (UID: "10b0ab88-8db9-4c9d-bc03-a9da374a33ca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:18 crc kubenswrapper[4857]: I1128 13:43:18.888701 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jwnb\" (UniqueName: \"kubernetes.io/projected/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-kube-api-access-4jwnb\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:18 crc kubenswrapper[4857]: I1128 13:43:18.888734 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:18 crc kubenswrapper[4857]: I1128 13:43:18.888744 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:18 crc kubenswrapper[4857]: I1128 13:43:18.888789 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10b0ab88-8db9-4c9d-bc03-a9da374a33ca-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:19 crc kubenswrapper[4857]: I1128 13:43:19.336047 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-dx7fm" event={"ID":"10b0ab88-8db9-4c9d-bc03-a9da374a33ca","Type":"ContainerDied","Data":"0d6403853444b94db4c6a59518a2399d76eff0cd5eb98d184301e3f13b48d8e3"} Nov 28 13:43:19 crc kubenswrapper[4857]: I1128 13:43:19.336107 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-dx7fm" Nov 28 13:43:19 crc kubenswrapper[4857]: I1128 13:43:19.336404 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d6403853444b94db4c6a59518a2399d76eff0cd5eb98d184301e3f13b48d8e3" Nov 28 13:43:19 crc kubenswrapper[4857]: I1128 13:43:19.517741 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:43:19 crc kubenswrapper[4857]: I1128 13:43:19.518006 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b1265be9-7097-4d62-aa44-c64c7bf3df52" containerName="nova-api-log" containerID="cri-o://00b8ca7a75dc677dcdb19febab64976b8c725b5cb1eddce840a03a18cc529897" gracePeriod=30 Nov 28 13:43:19 crc kubenswrapper[4857]: I1128 13:43:19.518047 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b1265be9-7097-4d62-aa44-c64c7bf3df52" containerName="nova-api-api" containerID="cri-o://25b990ff85ae73e5ffbc8ae0fd4bcc1c0c6610da4c54dd6bf6d6722b872dbe3a" gracePeriod=30 Nov 28 13:43:19 crc kubenswrapper[4857]: I1128 13:43:19.524700 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b1265be9-7097-4d62-aa44-c64c7bf3df52" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.197:8774/\": EOF" Nov 28 13:43:19 crc kubenswrapper[4857]: I1128 13:43:19.524855 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b1265be9-7097-4d62-aa44-c64c7bf3df52" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.197:8774/\": EOF" Nov 28 13:43:19 crc kubenswrapper[4857]: I1128 13:43:19.617946 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:43:19 crc kubenswrapper[4857]: I1128 13:43:19.618191 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="4643753a-aa92-4d3e-a95e-5124d7792edb" containerName="nova-scheduler-scheduler" containerID="cri-o://6cc050e6717812d5a9cb105606616c7aa8b61278288e1e4f8fb0f37c96bbe529" gracePeriod=30 Nov 28 13:43:19 crc kubenswrapper[4857]: I1128 13:43:19.665210 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:43:19 crc kubenswrapper[4857]: I1128 13:43:19.665438 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b57e1273-601c-4338-90bd-9047ae46b65a" containerName="nova-metadata-log" containerID="cri-o://849f1ffdfd0189980913d9c84417eeb4cc7cf8a96da7546b425510a43946f590" gracePeriod=30 Nov 28 13:43:19 crc kubenswrapper[4857]: I1128 13:43:19.665474 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b57e1273-601c-4338-90bd-9047ae46b65a" containerName="nova-metadata-metadata" containerID="cri-o://f40ddd142fa37048e48987bebb61612d533528aba55010e42f4d8ad33abba695" gracePeriod=30 Nov 28 13:43:20 crc kubenswrapper[4857]: I1128 13:43:20.493057 4857 generic.go:334] "Generic (PLEG): container finished" podID="b57e1273-601c-4338-90bd-9047ae46b65a" containerID="849f1ffdfd0189980913d9c84417eeb4cc7cf8a96da7546b425510a43946f590" exitCode=143 Nov 28 13:43:20 crc kubenswrapper[4857]: I1128 13:43:20.493183 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b57e1273-601c-4338-90bd-9047ae46b65a","Type":"ContainerDied","Data":"849f1ffdfd0189980913d9c84417eeb4cc7cf8a96da7546b425510a43946f590"} Nov 28 13:43:20 crc kubenswrapper[4857]: I1128 13:43:20.566983 4857 generic.go:334] "Generic (PLEG): container finished" podID="b1265be9-7097-4d62-aa44-c64c7bf3df52" containerID="00b8ca7a75dc677dcdb19febab64976b8c725b5cb1eddce840a03a18cc529897" exitCode=143 Nov 28 13:43:20 crc kubenswrapper[4857]: I1128 13:43:20.567026 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b1265be9-7097-4d62-aa44-c64c7bf3df52","Type":"ContainerDied","Data":"00b8ca7a75dc677dcdb19febab64976b8c725b5cb1eddce840a03a18cc529897"} Nov 28 13:43:20 crc kubenswrapper[4857]: I1128 13:43:20.995696 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.199780 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4643753a-aa92-4d3e-a95e-5124d7792edb-config-data\") pod \"4643753a-aa92-4d3e-a95e-5124d7792edb\" (UID: \"4643753a-aa92-4d3e-a95e-5124d7792edb\") " Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.199955 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4643753a-aa92-4d3e-a95e-5124d7792edb-combined-ca-bundle\") pod \"4643753a-aa92-4d3e-a95e-5124d7792edb\" (UID: \"4643753a-aa92-4d3e-a95e-5124d7792edb\") " Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.199984 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8hkpc\" (UniqueName: \"kubernetes.io/projected/4643753a-aa92-4d3e-a95e-5124d7792edb-kube-api-access-8hkpc\") pod \"4643753a-aa92-4d3e-a95e-5124d7792edb\" (UID: \"4643753a-aa92-4d3e-a95e-5124d7792edb\") " Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.212007 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4643753a-aa92-4d3e-a95e-5124d7792edb-kube-api-access-8hkpc" (OuterVolumeSpecName: "kube-api-access-8hkpc") pod "4643753a-aa92-4d3e-a95e-5124d7792edb" (UID: "4643753a-aa92-4d3e-a95e-5124d7792edb"). InnerVolumeSpecName "kube-api-access-8hkpc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.228438 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4643753a-aa92-4d3e-a95e-5124d7792edb-config-data" (OuterVolumeSpecName: "config-data") pod "4643753a-aa92-4d3e-a95e-5124d7792edb" (UID: "4643753a-aa92-4d3e-a95e-5124d7792edb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.243006 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4643753a-aa92-4d3e-a95e-5124d7792edb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4643753a-aa92-4d3e-a95e-5124d7792edb" (UID: "4643753a-aa92-4d3e-a95e-5124d7792edb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.302607 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4643753a-aa92-4d3e-a95e-5124d7792edb-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.302643 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8hkpc\" (UniqueName: \"kubernetes.io/projected/4643753a-aa92-4d3e-a95e-5124d7792edb-kube-api-access-8hkpc\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.302662 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4643753a-aa92-4d3e-a95e-5124d7792edb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.581404 4857 generic.go:334] "Generic (PLEG): container finished" podID="4643753a-aa92-4d3e-a95e-5124d7792edb" containerID="6cc050e6717812d5a9cb105606616c7aa8b61278288e1e4f8fb0f37c96bbe529" exitCode=0 Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.581452 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4643753a-aa92-4d3e-a95e-5124d7792edb","Type":"ContainerDied","Data":"6cc050e6717812d5a9cb105606616c7aa8b61278288e1e4f8fb0f37c96bbe529"} Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.581488 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4643753a-aa92-4d3e-a95e-5124d7792edb","Type":"ContainerDied","Data":"93850b54570d72c4714bd09e58fab986675a7c4c3300366fb43b0c93cde9a1d0"} Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.581510 4857 scope.go:117] "RemoveContainer" containerID="6cc050e6717812d5a9cb105606616c7aa8b61278288e1e4f8fb0f37c96bbe529" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.581525 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.642821 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.649021 4857 scope.go:117] "RemoveContainer" containerID="6cc050e6717812d5a9cb105606616c7aa8b61278288e1e4f8fb0f37c96bbe529" Nov 28 13:43:21 crc kubenswrapper[4857]: E1128 13:43:21.651047 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6cc050e6717812d5a9cb105606616c7aa8b61278288e1e4f8fb0f37c96bbe529\": container with ID starting with 6cc050e6717812d5a9cb105606616c7aa8b61278288e1e4f8fb0f37c96bbe529 not found: ID does not exist" containerID="6cc050e6717812d5a9cb105606616c7aa8b61278288e1e4f8fb0f37c96bbe529" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.651097 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cc050e6717812d5a9cb105606616c7aa8b61278288e1e4f8fb0f37c96bbe529"} err="failed to get container status \"6cc050e6717812d5a9cb105606616c7aa8b61278288e1e4f8fb0f37c96bbe529\": rpc error: code = NotFound desc = could not find container \"6cc050e6717812d5a9cb105606616c7aa8b61278288e1e4f8fb0f37c96bbe529\": container with ID starting with 6cc050e6717812d5a9cb105606616c7aa8b61278288e1e4f8fb0f37c96bbe529 not found: ID does not exist" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.661160 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.668856 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:43:21 crc kubenswrapper[4857]: E1128 13:43:21.669690 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4643753a-aa92-4d3e-a95e-5124d7792edb" containerName="nova-scheduler-scheduler" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.669736 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4643753a-aa92-4d3e-a95e-5124d7792edb" containerName="nova-scheduler-scheduler" Nov 28 13:43:21 crc kubenswrapper[4857]: E1128 13:43:21.669828 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78b07ef1-c929-45b8-b3e2-f0370c174054" containerName="init" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.669838 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="78b07ef1-c929-45b8-b3e2-f0370c174054" containerName="init" Nov 28 13:43:21 crc kubenswrapper[4857]: E1128 13:43:21.669852 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10b0ab88-8db9-4c9d-bc03-a9da374a33ca" containerName="nova-manage" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.669858 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="10b0ab88-8db9-4c9d-bc03-a9da374a33ca" containerName="nova-manage" Nov 28 13:43:21 crc kubenswrapper[4857]: E1128 13:43:21.669873 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78b07ef1-c929-45b8-b3e2-f0370c174054" containerName="dnsmasq-dns" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.669900 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="78b07ef1-c929-45b8-b3e2-f0370c174054" containerName="dnsmasq-dns" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.670183 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="78b07ef1-c929-45b8-b3e2-f0370c174054" containerName="dnsmasq-dns" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.670222 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="10b0ab88-8db9-4c9d-bc03-a9da374a33ca" containerName="nova-manage" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.670235 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4643753a-aa92-4d3e-a95e-5124d7792edb" containerName="nova-scheduler-scheduler" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.671556 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.674941 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.683979 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.723131 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310b8699-5d0c-4cce-b8fd-90ccedc2ce85-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"310b8699-5d0c-4cce-b8fd-90ccedc2ce85\") " pod="openstack/nova-scheduler-0" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.723202 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/310b8699-5d0c-4cce-b8fd-90ccedc2ce85-config-data\") pod \"nova-scheduler-0\" (UID: \"310b8699-5d0c-4cce-b8fd-90ccedc2ce85\") " pod="openstack/nova-scheduler-0" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.723266 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rdhz\" (UniqueName: \"kubernetes.io/projected/310b8699-5d0c-4cce-b8fd-90ccedc2ce85-kube-api-access-5rdhz\") pod \"nova-scheduler-0\" (UID: \"310b8699-5d0c-4cce-b8fd-90ccedc2ce85\") " pod="openstack/nova-scheduler-0" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.824397 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rdhz\" (UniqueName: \"kubernetes.io/projected/310b8699-5d0c-4cce-b8fd-90ccedc2ce85-kube-api-access-5rdhz\") pod \"nova-scheduler-0\" (UID: \"310b8699-5d0c-4cce-b8fd-90ccedc2ce85\") " pod="openstack/nova-scheduler-0" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.824495 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310b8699-5d0c-4cce-b8fd-90ccedc2ce85-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"310b8699-5d0c-4cce-b8fd-90ccedc2ce85\") " pod="openstack/nova-scheduler-0" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.824542 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/310b8699-5d0c-4cce-b8fd-90ccedc2ce85-config-data\") pod \"nova-scheduler-0\" (UID: \"310b8699-5d0c-4cce-b8fd-90ccedc2ce85\") " pod="openstack/nova-scheduler-0" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.828400 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/310b8699-5d0c-4cce-b8fd-90ccedc2ce85-config-data\") pod \"nova-scheduler-0\" (UID: \"310b8699-5d0c-4cce-b8fd-90ccedc2ce85\") " pod="openstack/nova-scheduler-0" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.832449 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310b8699-5d0c-4cce-b8fd-90ccedc2ce85-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"310b8699-5d0c-4cce-b8fd-90ccedc2ce85\") " pod="openstack/nova-scheduler-0" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.842447 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rdhz\" (UniqueName: \"kubernetes.io/projected/310b8699-5d0c-4cce-b8fd-90ccedc2ce85-kube-api-access-5rdhz\") pod \"nova-scheduler-0\" (UID: \"310b8699-5d0c-4cce-b8fd-90ccedc2ce85\") " pod="openstack/nova-scheduler-0" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:21.999983 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.325373 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4643753a-aa92-4d3e-a95e-5124d7792edb" path="/var/lib/kubelet/pods/4643753a-aa92-4d3e-a95e-5124d7792edb/volumes" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.550569 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.608254 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"310b8699-5d0c-4cce-b8fd-90ccedc2ce85","Type":"ContainerStarted","Data":"27e13ad600a897de0257f4229b70eca91bf43c65a3f94ca9f4d18dab7e7b5c08"} Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.797632 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="b57e1273-601c-4338-90bd-9047ae46b65a" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.190:8775/\": read tcp 10.217.0.2:34688->10.217.0.190:8775: read: connection reset by peer" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.797644 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="b57e1273-601c-4338-90bd-9047ae46b65a" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.190:8775/\": read tcp 10.217.0.2:34696->10.217.0.190:8775: read: connection reset by peer" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.329624 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.465151 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jbdt\" (UniqueName: \"kubernetes.io/projected/b57e1273-601c-4338-90bd-9047ae46b65a-kube-api-access-6jbdt\") pod \"b57e1273-601c-4338-90bd-9047ae46b65a\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.465520 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b57e1273-601c-4338-90bd-9047ae46b65a-nova-metadata-tls-certs\") pod \"b57e1273-601c-4338-90bd-9047ae46b65a\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.465649 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b57e1273-601c-4338-90bd-9047ae46b65a-combined-ca-bundle\") pod \"b57e1273-601c-4338-90bd-9047ae46b65a\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.465717 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b57e1273-601c-4338-90bd-9047ae46b65a-config-data\") pod \"b57e1273-601c-4338-90bd-9047ae46b65a\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.465764 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b57e1273-601c-4338-90bd-9047ae46b65a-logs\") pod \"b57e1273-601c-4338-90bd-9047ae46b65a\" (UID: \"b57e1273-601c-4338-90bd-9047ae46b65a\") " Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.469026 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b57e1273-601c-4338-90bd-9047ae46b65a-logs" (OuterVolumeSpecName: "logs") pod "b57e1273-601c-4338-90bd-9047ae46b65a" (UID: "b57e1273-601c-4338-90bd-9047ae46b65a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.473000 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b57e1273-601c-4338-90bd-9047ae46b65a-kube-api-access-6jbdt" (OuterVolumeSpecName: "kube-api-access-6jbdt") pod "b57e1273-601c-4338-90bd-9047ae46b65a" (UID: "b57e1273-601c-4338-90bd-9047ae46b65a"). InnerVolumeSpecName "kube-api-access-6jbdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.510429 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b57e1273-601c-4338-90bd-9047ae46b65a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b57e1273-601c-4338-90bd-9047ae46b65a" (UID: "b57e1273-601c-4338-90bd-9047ae46b65a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.512188 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b57e1273-601c-4338-90bd-9047ae46b65a-config-data" (OuterVolumeSpecName: "config-data") pod "b57e1273-601c-4338-90bd-9047ae46b65a" (UID: "b57e1273-601c-4338-90bd-9047ae46b65a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.527408 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b57e1273-601c-4338-90bd-9047ae46b65a-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "b57e1273-601c-4338-90bd-9047ae46b65a" (UID: "b57e1273-601c-4338-90bd-9047ae46b65a"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.568365 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b57e1273-601c-4338-90bd-9047ae46b65a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.568402 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b57e1273-601c-4338-90bd-9047ae46b65a-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.568416 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jbdt\" (UniqueName: \"kubernetes.io/projected/b57e1273-601c-4338-90bd-9047ae46b65a-kube-api-access-6jbdt\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.568426 4857 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b57e1273-601c-4338-90bd-9047ae46b65a-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.568436 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b57e1273-601c-4338-90bd-9047ae46b65a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.630847 4857 generic.go:334] "Generic (PLEG): container finished" podID="b57e1273-601c-4338-90bd-9047ae46b65a" containerID="f40ddd142fa37048e48987bebb61612d533528aba55010e42f4d8ad33abba695" exitCode=0 Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.630920 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b57e1273-601c-4338-90bd-9047ae46b65a","Type":"ContainerDied","Data":"f40ddd142fa37048e48987bebb61612d533528aba55010e42f4d8ad33abba695"} Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.630952 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b57e1273-601c-4338-90bd-9047ae46b65a","Type":"ContainerDied","Data":"54b0763f47902ee147e8b9752b435c3575ae599eb60befbd8a952c495c1effaf"} Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.630973 4857 scope.go:117] "RemoveContainer" containerID="f40ddd142fa37048e48987bebb61612d533528aba55010e42f4d8ad33abba695" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.631088 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.634457 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"310b8699-5d0c-4cce-b8fd-90ccedc2ce85","Type":"ContainerStarted","Data":"8b4e711ca231fe7452ef2052a4b12b122b8a749ee4670a4b691165e5f548e102"} Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.656956 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.656925996 podStartE2EDuration="2.656925996s" podCreationTimestamp="2025-11-28 13:43:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:43:23.650913854 +0000 UTC m=+1495.678289021" watchObservedRunningTime="2025-11-28 13:43:23.656925996 +0000 UTC m=+1495.684301163" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.683720 4857 scope.go:117] "RemoveContainer" containerID="849f1ffdfd0189980913d9c84417eeb4cc7cf8a96da7546b425510a43946f590" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.698622 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.721319 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.734994 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:43:23 crc kubenswrapper[4857]: E1128 13:43:23.736031 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b57e1273-601c-4338-90bd-9047ae46b65a" containerName="nova-metadata-metadata" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.736057 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b57e1273-601c-4338-90bd-9047ae46b65a" containerName="nova-metadata-metadata" Nov 28 13:43:23 crc kubenswrapper[4857]: E1128 13:43:23.736117 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b57e1273-601c-4338-90bd-9047ae46b65a" containerName="nova-metadata-log" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.736127 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b57e1273-601c-4338-90bd-9047ae46b65a" containerName="nova-metadata-log" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.736509 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b57e1273-601c-4338-90bd-9047ae46b65a" containerName="nova-metadata-log" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.736528 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b57e1273-601c-4338-90bd-9047ae46b65a" containerName="nova-metadata-metadata" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.738050 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.739016 4857 scope.go:117] "RemoveContainer" containerID="f40ddd142fa37048e48987bebb61612d533528aba55010e42f4d8ad33abba695" Nov 28 13:43:23 crc kubenswrapper[4857]: E1128 13:43:23.740248 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f40ddd142fa37048e48987bebb61612d533528aba55010e42f4d8ad33abba695\": container with ID starting with f40ddd142fa37048e48987bebb61612d533528aba55010e42f4d8ad33abba695 not found: ID does not exist" containerID="f40ddd142fa37048e48987bebb61612d533528aba55010e42f4d8ad33abba695" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.740286 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f40ddd142fa37048e48987bebb61612d533528aba55010e42f4d8ad33abba695"} err="failed to get container status \"f40ddd142fa37048e48987bebb61612d533528aba55010e42f4d8ad33abba695\": rpc error: code = NotFound desc = could not find container \"f40ddd142fa37048e48987bebb61612d533528aba55010e42f4d8ad33abba695\": container with ID starting with f40ddd142fa37048e48987bebb61612d533528aba55010e42f4d8ad33abba695 not found: ID does not exist" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.740312 4857 scope.go:117] "RemoveContainer" containerID="849f1ffdfd0189980913d9c84417eeb4cc7cf8a96da7546b425510a43946f590" Nov 28 13:43:23 crc kubenswrapper[4857]: E1128 13:43:23.740897 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"849f1ffdfd0189980913d9c84417eeb4cc7cf8a96da7546b425510a43946f590\": container with ID starting with 849f1ffdfd0189980913d9c84417eeb4cc7cf8a96da7546b425510a43946f590 not found: ID does not exist" containerID="849f1ffdfd0189980913d9c84417eeb4cc7cf8a96da7546b425510a43946f590" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.740932 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"849f1ffdfd0189980913d9c84417eeb4cc7cf8a96da7546b425510a43946f590"} err="failed to get container status \"849f1ffdfd0189980913d9c84417eeb4cc7cf8a96da7546b425510a43946f590\": rpc error: code = NotFound desc = could not find container \"849f1ffdfd0189980913d9c84417eeb4cc7cf8a96da7546b425510a43946f590\": container with ID starting with 849f1ffdfd0189980913d9c84417eeb4cc7cf8a96da7546b425510a43946f590 not found: ID does not exist" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.741271 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.741532 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.754298 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.887078 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-logs\") pod \"nova-metadata-0\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " pod="openstack/nova-metadata-0" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.887490 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " pod="openstack/nova-metadata-0" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.887532 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqckr\" (UniqueName: \"kubernetes.io/projected/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-kube-api-access-dqckr\") pod \"nova-metadata-0\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " pod="openstack/nova-metadata-0" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.887647 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-config-data\") pod \"nova-metadata-0\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " pod="openstack/nova-metadata-0" Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.887899 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " pod="openstack/nova-metadata-0" Nov 28 13:43:24 crc kubenswrapper[4857]: I1128 13:43:24.005622 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " pod="openstack/nova-metadata-0" Nov 28 13:43:24 crc kubenswrapper[4857]: I1128 13:43:24.005686 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqckr\" (UniqueName: \"kubernetes.io/projected/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-kube-api-access-dqckr\") pod \"nova-metadata-0\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " pod="openstack/nova-metadata-0" Nov 28 13:43:24 crc kubenswrapper[4857]: I1128 13:43:24.005743 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-config-data\") pod \"nova-metadata-0\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " pod="openstack/nova-metadata-0" Nov 28 13:43:24 crc kubenswrapper[4857]: I1128 13:43:24.005817 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " pod="openstack/nova-metadata-0" Nov 28 13:43:24 crc kubenswrapper[4857]: I1128 13:43:24.005870 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-logs\") pod \"nova-metadata-0\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " pod="openstack/nova-metadata-0" Nov 28 13:43:24 crc kubenswrapper[4857]: I1128 13:43:24.006290 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-logs\") pod \"nova-metadata-0\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " pod="openstack/nova-metadata-0" Nov 28 13:43:24 crc kubenswrapper[4857]: I1128 13:43:24.063537 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " pod="openstack/nova-metadata-0" Nov 28 13:43:24 crc kubenswrapper[4857]: I1128 13:43:24.065178 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-config-data\") pod \"nova-metadata-0\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " pod="openstack/nova-metadata-0" Nov 28 13:43:24 crc kubenswrapper[4857]: I1128 13:43:24.098318 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " pod="openstack/nova-metadata-0" Nov 28 13:43:24 crc kubenswrapper[4857]: I1128 13:43:24.104453 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqckr\" (UniqueName: \"kubernetes.io/projected/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-kube-api-access-dqckr\") pod \"nova-metadata-0\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " pod="openstack/nova-metadata-0" Nov 28 13:43:24 crc kubenswrapper[4857]: I1128 13:43:24.325099 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b57e1273-601c-4338-90bd-9047ae46b65a" path="/var/lib/kubelet/pods/b57e1273-601c-4338-90bd-9047ae46b65a/volumes" Nov 28 13:43:24 crc kubenswrapper[4857]: I1128 13:43:24.359598 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:43:25 crc kubenswrapper[4857]: I1128 13:43:24.863916 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:43:25 crc kubenswrapper[4857]: W1128 13:43:24.870467 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podea2604b9_e3ca_4145_b8c3_42a9b8e3b286.slice/crio-553e1bee8ec433037849576640aa295afd2274c2d101bf044e81f16bf32c1a61 WatchSource:0}: Error finding container 553e1bee8ec433037849576640aa295afd2274c2d101bf044e81f16bf32c1a61: Status 404 returned error can't find the container with id 553e1bee8ec433037849576640aa295afd2274c2d101bf044e81f16bf32c1a61 Nov 28 13:43:25 crc kubenswrapper[4857]: I1128 13:43:25.735544 4857 generic.go:334] "Generic (PLEG): container finished" podID="b1265be9-7097-4d62-aa44-c64c7bf3df52" containerID="25b990ff85ae73e5ffbc8ae0fd4bcc1c0c6610da4c54dd6bf6d6722b872dbe3a" exitCode=0 Nov 28 13:43:25 crc kubenswrapper[4857]: I1128 13:43:25.736166 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b1265be9-7097-4d62-aa44-c64c7bf3df52","Type":"ContainerDied","Data":"25b990ff85ae73e5ffbc8ae0fd4bcc1c0c6610da4c54dd6bf6d6722b872dbe3a"} Nov 28 13:43:25 crc kubenswrapper[4857]: I1128 13:43:25.755191 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286","Type":"ContainerStarted","Data":"ec5937438716528a8aa131c5d6bf8c9a57f6a24f30318571c52d136b077dfcf7"} Nov 28 13:43:25 crc kubenswrapper[4857]: I1128 13:43:25.755235 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286","Type":"ContainerStarted","Data":"e575a5748441c404e5228a5c2146f98ab1fd6c5ae67eb9523fedd879f306a6a7"} Nov 28 13:43:25 crc kubenswrapper[4857]: I1128 13:43:25.755247 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286","Type":"ContainerStarted","Data":"553e1bee8ec433037849576640aa295afd2274c2d101bf044e81f16bf32c1a61"} Nov 28 13:43:25 crc kubenswrapper[4857]: I1128 13:43:25.796020 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.795998995 podStartE2EDuration="2.795998995s" podCreationTimestamp="2025-11-28 13:43:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:43:25.780346567 +0000 UTC m=+1497.807721744" watchObservedRunningTime="2025-11-28 13:43:25.795998995 +0000 UTC m=+1497.823374162" Nov 28 13:43:25 crc kubenswrapper[4857]: I1128 13:43:25.852744 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.042173 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-config-data\") pod \"b1265be9-7097-4d62-aa44-c64c7bf3df52\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.042223 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-internal-tls-certs\") pod \"b1265be9-7097-4d62-aa44-c64c7bf3df52\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.042261 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1265be9-7097-4d62-aa44-c64c7bf3df52-logs\") pod \"b1265be9-7097-4d62-aa44-c64c7bf3df52\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.042351 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-public-tls-certs\") pod \"b1265be9-7097-4d62-aa44-c64c7bf3df52\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.042376 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2f49f\" (UniqueName: \"kubernetes.io/projected/b1265be9-7097-4d62-aa44-c64c7bf3df52-kube-api-access-2f49f\") pod \"b1265be9-7097-4d62-aa44-c64c7bf3df52\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.042527 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-combined-ca-bundle\") pod \"b1265be9-7097-4d62-aa44-c64c7bf3df52\" (UID: \"b1265be9-7097-4d62-aa44-c64c7bf3df52\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.043202 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1265be9-7097-4d62-aa44-c64c7bf3df52-logs" (OuterVolumeSpecName: "logs") pod "b1265be9-7097-4d62-aa44-c64c7bf3df52" (UID: "b1265be9-7097-4d62-aa44-c64c7bf3df52"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.047381 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1265be9-7097-4d62-aa44-c64c7bf3df52-kube-api-access-2f49f" (OuterVolumeSpecName: "kube-api-access-2f49f") pod "b1265be9-7097-4d62-aa44-c64c7bf3df52" (UID: "b1265be9-7097-4d62-aa44-c64c7bf3df52"). InnerVolumeSpecName "kube-api-access-2f49f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.068996 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-config-data" (OuterVolumeSpecName: "config-data") pod "b1265be9-7097-4d62-aa44-c64c7bf3df52" (UID: "b1265be9-7097-4d62-aa44-c64c7bf3df52"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.082715 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b1265be9-7097-4d62-aa44-c64c7bf3df52" (UID: "b1265be9-7097-4d62-aa44-c64c7bf3df52"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.102701 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b1265be9-7097-4d62-aa44-c64c7bf3df52" (UID: "b1265be9-7097-4d62-aa44-c64c7bf3df52"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.106846 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "b1265be9-7097-4d62-aa44-c64c7bf3df52" (UID: "b1265be9-7097-4d62-aa44-c64c7bf3df52"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.145691 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.145798 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.145821 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1265be9-7097-4d62-aa44-c64c7bf3df52-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.145842 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.145862 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2f49f\" (UniqueName: \"kubernetes.io/projected/b1265be9-7097-4d62-aa44-c64c7bf3df52-kube-api-access-2f49f\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.145881 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1265be9-7097-4d62-aa44-c64c7bf3df52-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.769719 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b1265be9-7097-4d62-aa44-c64c7bf3df52","Type":"ContainerDied","Data":"6e4fb43cb8f553d9df735cd82300f61b64493b077d387af84bae94c0dad0dc0f"} Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.769790 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.770138 4857 scope.go:117] "RemoveContainer" containerID="25b990ff85ae73e5ffbc8ae0fd4bcc1c0c6610da4c54dd6bf6d6722b872dbe3a" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.796532 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.798817 4857 scope.go:117] "RemoveContainer" containerID="00b8ca7a75dc677dcdb19febab64976b8c725b5cb1eddce840a03a18cc529897" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.809456 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.831044 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 13:43:26 crc kubenswrapper[4857]: E1128 13:43:26.831560 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1265be9-7097-4d62-aa44-c64c7bf3df52" containerName="nova-api-api" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.831582 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1265be9-7097-4d62-aa44-c64c7bf3df52" containerName="nova-api-api" Nov 28 13:43:26 crc kubenswrapper[4857]: E1128 13:43:26.831696 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1265be9-7097-4d62-aa44-c64c7bf3df52" containerName="nova-api-log" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.831713 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1265be9-7097-4d62-aa44-c64c7bf3df52" containerName="nova-api-log" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.832107 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1265be9-7097-4d62-aa44-c64c7bf3df52" containerName="nova-api-log" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.832147 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1265be9-7097-4d62-aa44-c64c7bf3df52" containerName="nova-api-api" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.833600 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.838332 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.838500 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.838826 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.846992 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.864589 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-config-data\") pod \"nova-api-0\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.864649 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.864675 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrdq7\" (UniqueName: \"kubernetes.io/projected/64da16e3-099d-4def-9656-91f40d64672f-kube-api-access-wrdq7\") pod \"nova-api-0\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.864700 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-internal-tls-certs\") pod \"nova-api-0\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.864742 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-public-tls-certs\") pod \"nova-api-0\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.864794 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64da16e3-099d-4def-9656-91f40d64672f-logs\") pod \"nova-api-0\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.966945 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-config-data\") pod \"nova-api-0\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.967037 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.967095 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrdq7\" (UniqueName: \"kubernetes.io/projected/64da16e3-099d-4def-9656-91f40d64672f-kube-api-access-wrdq7\") pod \"nova-api-0\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.967155 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-internal-tls-certs\") pod \"nova-api-0\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.967239 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-public-tls-certs\") pod \"nova-api-0\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.967286 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64da16e3-099d-4def-9656-91f40d64672f-logs\") pod \"nova-api-0\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.968003 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64da16e3-099d-4def-9656-91f40d64672f-logs\") pod \"nova-api-0\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.972144 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-internal-tls-certs\") pod \"nova-api-0\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.972411 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.973339 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-public-tls-certs\") pod \"nova-api-0\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.985428 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrdq7\" (UniqueName: \"kubernetes.io/projected/64da16e3-099d-4def-9656-91f40d64672f-kube-api-access-wrdq7\") pod \"nova-api-0\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " pod="openstack/nova-api-0" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.986928 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-config-data\") pod \"nova-api-0\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " pod="openstack/nova-api-0" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.000829 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.164867 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.723211 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.782665 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"64da16e3-099d-4def-9656-91f40d64672f","Type":"ContainerStarted","Data":"15d27efb250638fac86a83ac72b6e6b4f58a2384e37c15d20ca62c7fb2f83e07"} Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.337733 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1265be9-7097-4d62-aa44-c64c7bf3df52" path="/var/lib/kubelet/pods/b1265be9-7097-4d62-aa44-c64c7bf3df52/volumes" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.794425 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"64da16e3-099d-4def-9656-91f40d64672f","Type":"ContainerStarted","Data":"1146e3ec8a4d803ee31e0a88958bb4723468c3f9bc7e9a7d393734acda6d6b4a"} Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.795561 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"64da16e3-099d-4def-9656-91f40d64672f","Type":"ContainerStarted","Data":"ee074130ed95276ff4a950681c7df3344a6e4c3aa86435eb4b8f9e471126f272"} Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.822928 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.822909525 podStartE2EDuration="2.822909525s" podCreationTimestamp="2025-11-28 13:43:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:43:28.814252497 +0000 UTC m=+1500.841627714" watchObservedRunningTime="2025-11-28 13:43:28.822909525 +0000 UTC m=+1500.850284692" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.359931 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.359997 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.019705 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.061796 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.868659 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 13:43:33 crc kubenswrapper[4857]: I1128 13:43:33.178498 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:43:33 crc kubenswrapper[4857]: I1128 13:43:33.178562 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.360239 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.360624 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.404284 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-db4ks"] Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.407057 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-db4ks" Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.417585 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-db4ks"] Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.791972 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a41e22f1-4643-4ebc-ae43-7c0e9f31836d-catalog-content\") pod \"redhat-operators-db4ks\" (UID: \"a41e22f1-4643-4ebc-ae43-7c0e9f31836d\") " pod="openshift-marketplace/redhat-operators-db4ks" Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.792035 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsr6f\" (UniqueName: \"kubernetes.io/projected/a41e22f1-4643-4ebc-ae43-7c0e9f31836d-kube-api-access-nsr6f\") pod \"redhat-operators-db4ks\" (UID: \"a41e22f1-4643-4ebc-ae43-7c0e9f31836d\") " pod="openshift-marketplace/redhat-operators-db4ks" Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.792080 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a41e22f1-4643-4ebc-ae43-7c0e9f31836d-utilities\") pod \"redhat-operators-db4ks\" (UID: \"a41e22f1-4643-4ebc-ae43-7c0e9f31836d\") " pod="openshift-marketplace/redhat-operators-db4ks" Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.893833 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a41e22f1-4643-4ebc-ae43-7c0e9f31836d-catalog-content\") pod \"redhat-operators-db4ks\" (UID: \"a41e22f1-4643-4ebc-ae43-7c0e9f31836d\") " pod="openshift-marketplace/redhat-operators-db4ks" Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.894064 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsr6f\" (UniqueName: \"kubernetes.io/projected/a41e22f1-4643-4ebc-ae43-7c0e9f31836d-kube-api-access-nsr6f\") pod \"redhat-operators-db4ks\" (UID: \"a41e22f1-4643-4ebc-ae43-7c0e9f31836d\") " pod="openshift-marketplace/redhat-operators-db4ks" Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.894189 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a41e22f1-4643-4ebc-ae43-7c0e9f31836d-utilities\") pod \"redhat-operators-db4ks\" (UID: \"a41e22f1-4643-4ebc-ae43-7c0e9f31836d\") " pod="openshift-marketplace/redhat-operators-db4ks" Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.894244 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a41e22f1-4643-4ebc-ae43-7c0e9f31836d-catalog-content\") pod \"redhat-operators-db4ks\" (UID: \"a41e22f1-4643-4ebc-ae43-7c0e9f31836d\") " pod="openshift-marketplace/redhat-operators-db4ks" Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.894619 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a41e22f1-4643-4ebc-ae43-7c0e9f31836d-utilities\") pod \"redhat-operators-db4ks\" (UID: \"a41e22f1-4643-4ebc-ae43-7c0e9f31836d\") " pod="openshift-marketplace/redhat-operators-db4ks" Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.913282 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsr6f\" (UniqueName: \"kubernetes.io/projected/a41e22f1-4643-4ebc-ae43-7c0e9f31836d-kube-api-access-nsr6f\") pod \"redhat-operators-db4ks\" (UID: \"a41e22f1-4643-4ebc-ae43-7c0e9f31836d\") " pod="openshift-marketplace/redhat-operators-db4ks" Nov 28 13:43:35 crc kubenswrapper[4857]: I1128 13:43:35.038767 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-db4ks" Nov 28 13:43:35 crc kubenswrapper[4857]: I1128 13:43:35.374933 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 13:43:35 crc kubenswrapper[4857]: I1128 13:43:35.374964 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 13:43:35 crc kubenswrapper[4857]: I1128 13:43:35.520530 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-db4ks"] Nov 28 13:43:35 crc kubenswrapper[4857]: I1128 13:43:35.873417 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-db4ks" event={"ID":"a41e22f1-4643-4ebc-ae43-7c0e9f31836d","Type":"ContainerStarted","Data":"a07035d352b16e622aadb233282f9aa23680bc2e5b44da556610ff8bd83e1c99"} Nov 28 13:43:35 crc kubenswrapper[4857]: I1128 13:43:35.873461 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-db4ks" event={"ID":"a41e22f1-4643-4ebc-ae43-7c0e9f31836d","Type":"ContainerStarted","Data":"3626043f5ed62ac74c46b4886cee30b63e41077edef79cffafdbde3b85d8df6e"} Nov 28 13:43:36 crc kubenswrapper[4857]: I1128 13:43:36.883451 4857 generic.go:334] "Generic (PLEG): container finished" podID="a41e22f1-4643-4ebc-ae43-7c0e9f31836d" containerID="a07035d352b16e622aadb233282f9aa23680bc2e5b44da556610ff8bd83e1c99" exitCode=0 Nov 28 13:43:36 crc kubenswrapper[4857]: I1128 13:43:36.883535 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-db4ks" event={"ID":"a41e22f1-4643-4ebc-ae43-7c0e9f31836d","Type":"ContainerDied","Data":"a07035d352b16e622aadb233282f9aa23680bc2e5b44da556610ff8bd83e1c99"} Nov 28 13:43:37 crc kubenswrapper[4857]: I1128 13:43:37.165674 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 13:43:37 crc kubenswrapper[4857]: I1128 13:43:37.166005 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 13:43:37 crc kubenswrapper[4857]: I1128 13:43:37.894442 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-db4ks" event={"ID":"a41e22f1-4643-4ebc-ae43-7c0e9f31836d","Type":"ContainerStarted","Data":"f74b93ee7b2cb6b3b74e5d7d379693cfb5bc28c732c438affe4653dcedcab1fe"} Nov 28 13:43:38 crc kubenswrapper[4857]: I1128 13:43:38.178001 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="64da16e3-099d-4def-9656-91f40d64672f" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.202:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 13:43:38 crc kubenswrapper[4857]: I1128 13:43:38.178007 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="64da16e3-099d-4def-9656-91f40d64672f" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.202:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 13:43:38 crc kubenswrapper[4857]: I1128 13:43:38.908927 4857 generic.go:334] "Generic (PLEG): container finished" podID="a41e22f1-4643-4ebc-ae43-7c0e9f31836d" containerID="f74b93ee7b2cb6b3b74e5d7d379693cfb5bc28c732c438affe4653dcedcab1fe" exitCode=0 Nov 28 13:43:38 crc kubenswrapper[4857]: I1128 13:43:38.909976 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-db4ks" event={"ID":"a41e22f1-4643-4ebc-ae43-7c0e9f31836d","Type":"ContainerDied","Data":"f74b93ee7b2cb6b3b74e5d7d379693cfb5bc28c732c438affe4653dcedcab1fe"} Nov 28 13:43:39 crc kubenswrapper[4857]: I1128 13:43:39.587096 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 13:43:40 crc kubenswrapper[4857]: I1128 13:43:40.931541 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-db4ks" event={"ID":"a41e22f1-4643-4ebc-ae43-7c0e9f31836d","Type":"ContainerStarted","Data":"4f6ccffc5a220df93bff75c0314e79f475102df60573550749910e0b3504d523"} Nov 28 13:43:40 crc kubenswrapper[4857]: I1128 13:43:40.957315 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-db4ks" podStartSLOduration=2.867654485 podStartE2EDuration="6.957296544s" podCreationTimestamp="2025-11-28 13:43:34 +0000 UTC" firstStartedPulling="2025-11-28 13:43:35.874660521 +0000 UTC m=+1507.902035688" lastFinishedPulling="2025-11-28 13:43:39.96430254 +0000 UTC m=+1511.991677747" observedRunningTime="2025-11-28 13:43:40.949651735 +0000 UTC m=+1512.977026902" watchObservedRunningTime="2025-11-28 13:43:40.957296544 +0000 UTC m=+1512.984671711" Nov 28 13:43:44 crc kubenswrapper[4857]: I1128 13:43:44.367006 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 13:43:44 crc kubenswrapper[4857]: I1128 13:43:44.370386 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 13:43:44 crc kubenswrapper[4857]: I1128 13:43:44.374728 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 13:43:44 crc kubenswrapper[4857]: I1128 13:43:44.992936 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 13:43:45 crc kubenswrapper[4857]: I1128 13:43:45.049245 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-db4ks" Nov 28 13:43:45 crc kubenswrapper[4857]: I1128 13:43:45.049355 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-db4ks" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.132386 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-db4ks" podUID="a41e22f1-4643-4ebc-ae43-7c0e9f31836d" containerName="registry-server" probeResult="failure" output=< Nov 28 13:43:46 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 13:43:46 crc kubenswrapper[4857]: > Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.014794 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5rq62"] Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.017589 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5rq62" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.030029 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5rq62"] Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.203672 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjr5f\" (UniqueName: \"kubernetes.io/projected/f01273d3-6c15-4b75-b19e-a1bd1ca86283-kube-api-access-hjr5f\") pod \"redhat-marketplace-5rq62\" (UID: \"f01273d3-6c15-4b75-b19e-a1bd1ca86283\") " pod="openshift-marketplace/redhat-marketplace-5rq62" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.203719 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f01273d3-6c15-4b75-b19e-a1bd1ca86283-utilities\") pod \"redhat-marketplace-5rq62\" (UID: \"f01273d3-6c15-4b75-b19e-a1bd1ca86283\") " pod="openshift-marketplace/redhat-marketplace-5rq62" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.203805 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f01273d3-6c15-4b75-b19e-a1bd1ca86283-catalog-content\") pod \"redhat-marketplace-5rq62\" (UID: \"f01273d3-6c15-4b75-b19e-a1bd1ca86283\") " pod="openshift-marketplace/redhat-marketplace-5rq62" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.209890 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.211323 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.224503 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.245309 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.304971 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjr5f\" (UniqueName: \"kubernetes.io/projected/f01273d3-6c15-4b75-b19e-a1bd1ca86283-kube-api-access-hjr5f\") pod \"redhat-marketplace-5rq62\" (UID: \"f01273d3-6c15-4b75-b19e-a1bd1ca86283\") " pod="openshift-marketplace/redhat-marketplace-5rq62" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.305021 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f01273d3-6c15-4b75-b19e-a1bd1ca86283-utilities\") pod \"redhat-marketplace-5rq62\" (UID: \"f01273d3-6c15-4b75-b19e-a1bd1ca86283\") " pod="openshift-marketplace/redhat-marketplace-5rq62" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.305070 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f01273d3-6c15-4b75-b19e-a1bd1ca86283-catalog-content\") pod \"redhat-marketplace-5rq62\" (UID: \"f01273d3-6c15-4b75-b19e-a1bd1ca86283\") " pod="openshift-marketplace/redhat-marketplace-5rq62" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.305643 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f01273d3-6c15-4b75-b19e-a1bd1ca86283-utilities\") pod \"redhat-marketplace-5rq62\" (UID: \"f01273d3-6c15-4b75-b19e-a1bd1ca86283\") " pod="openshift-marketplace/redhat-marketplace-5rq62" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.305658 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f01273d3-6c15-4b75-b19e-a1bd1ca86283-catalog-content\") pod \"redhat-marketplace-5rq62\" (UID: \"f01273d3-6c15-4b75-b19e-a1bd1ca86283\") " pod="openshift-marketplace/redhat-marketplace-5rq62" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.328414 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjr5f\" (UniqueName: \"kubernetes.io/projected/f01273d3-6c15-4b75-b19e-a1bd1ca86283-kube-api-access-hjr5f\") pod \"redhat-marketplace-5rq62\" (UID: \"f01273d3-6c15-4b75-b19e-a1bd1ca86283\") " pod="openshift-marketplace/redhat-marketplace-5rq62" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.517346 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5rq62" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.993906 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5rq62"] Nov 28 13:43:48 crc kubenswrapper[4857]: W1128 13:43:48.000724 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf01273d3_6c15_4b75_b19e_a1bd1ca86283.slice/crio-9acc2a85e37ab29c253f85544411400ef9599d1335fa2f295cddf878e0a4b2e2 WatchSource:0}: Error finding container 9acc2a85e37ab29c253f85544411400ef9599d1335fa2f295cddf878e0a4b2e2: Status 404 returned error can't find the container with id 9acc2a85e37ab29c253f85544411400ef9599d1335fa2f295cddf878e0a4b2e2 Nov 28 13:43:48 crc kubenswrapper[4857]: I1128 13:43:48.230068 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5rq62" event={"ID":"f01273d3-6c15-4b75-b19e-a1bd1ca86283","Type":"ContainerStarted","Data":"9acc2a85e37ab29c253f85544411400ef9599d1335fa2f295cddf878e0a4b2e2"} Nov 28 13:43:48 crc kubenswrapper[4857]: I1128 13:43:48.230280 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 13:43:48 crc kubenswrapper[4857]: I1128 13:43:48.239825 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 13:43:49 crc kubenswrapper[4857]: I1128 13:43:49.250424 4857 generic.go:334] "Generic (PLEG): container finished" podID="f01273d3-6c15-4b75-b19e-a1bd1ca86283" containerID="36aaf5dd33d18ad66f99d6923353e1fba86b839da0ebc8a8f45b3aa71f80cd38" exitCode=0 Nov 28 13:43:49 crc kubenswrapper[4857]: I1128 13:43:49.250517 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5rq62" event={"ID":"f01273d3-6c15-4b75-b19e-a1bd1ca86283","Type":"ContainerDied","Data":"36aaf5dd33d18ad66f99d6923353e1fba86b839da0ebc8a8f45b3aa71f80cd38"} Nov 28 13:43:50 crc kubenswrapper[4857]: I1128 13:43:50.262657 4857 generic.go:334] "Generic (PLEG): container finished" podID="f01273d3-6c15-4b75-b19e-a1bd1ca86283" containerID="e58e384b9f0dc0fea5efa08f06c714f6da314a985617a5ac073df5e19f556088" exitCode=0 Nov 28 13:43:50 crc kubenswrapper[4857]: I1128 13:43:50.262737 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5rq62" event={"ID":"f01273d3-6c15-4b75-b19e-a1bd1ca86283","Type":"ContainerDied","Data":"e58e384b9f0dc0fea5efa08f06c714f6da314a985617a5ac073df5e19f556088"} Nov 28 13:43:51 crc kubenswrapper[4857]: I1128 13:43:51.280182 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5rq62" event={"ID":"f01273d3-6c15-4b75-b19e-a1bd1ca86283","Type":"ContainerStarted","Data":"4713d67a1da1d4cc886d06bddef8243eea9d54b7ac27a8b832c3e32d7368dcda"} Nov 28 13:43:51 crc kubenswrapper[4857]: I1128 13:43:51.305132 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5rq62" podStartSLOduration=3.7904374880000002 podStartE2EDuration="5.305110724s" podCreationTimestamp="2025-11-28 13:43:46 +0000 UTC" firstStartedPulling="2025-11-28 13:43:49.260609722 +0000 UTC m=+1521.287984889" lastFinishedPulling="2025-11-28 13:43:50.775282918 +0000 UTC m=+1522.802658125" observedRunningTime="2025-11-28 13:43:51.298638988 +0000 UTC m=+1523.326014155" watchObservedRunningTime="2025-11-28 13:43:51.305110724 +0000 UTC m=+1523.332485891" Nov 28 13:43:55 crc kubenswrapper[4857]: I1128 13:43:55.204989 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-db4ks" Nov 28 13:43:55 crc kubenswrapper[4857]: I1128 13:43:55.255228 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-db4ks" Nov 28 13:43:55 crc kubenswrapper[4857]: I1128 13:43:55.441020 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-db4ks"] Nov 28 13:43:56 crc kubenswrapper[4857]: I1128 13:43:56.339683 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-db4ks" podUID="a41e22f1-4643-4ebc-ae43-7c0e9f31836d" containerName="registry-server" containerID="cri-o://4f6ccffc5a220df93bff75c0314e79f475102df60573550749910e0b3504d523" gracePeriod=2 Nov 28 13:43:57 crc kubenswrapper[4857]: I1128 13:43:57.352631 4857 generic.go:334] "Generic (PLEG): container finished" podID="a41e22f1-4643-4ebc-ae43-7c0e9f31836d" containerID="4f6ccffc5a220df93bff75c0314e79f475102df60573550749910e0b3504d523" exitCode=0 Nov 28 13:43:57 crc kubenswrapper[4857]: I1128 13:43:57.352707 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-db4ks" event={"ID":"a41e22f1-4643-4ebc-ae43-7c0e9f31836d","Type":"ContainerDied","Data":"4f6ccffc5a220df93bff75c0314e79f475102df60573550749910e0b3504d523"} Nov 28 13:43:57 crc kubenswrapper[4857]: I1128 13:43:57.454708 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-db4ks" Nov 28 13:43:57 crc kubenswrapper[4857]: I1128 13:43:57.518541 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5rq62" Nov 28 13:43:57 crc kubenswrapper[4857]: I1128 13:43:57.519153 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5rq62" Nov 28 13:43:57 crc kubenswrapper[4857]: I1128 13:43:57.579805 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5rq62" Nov 28 13:43:57 crc kubenswrapper[4857]: I1128 13:43:57.629594 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a41e22f1-4643-4ebc-ae43-7c0e9f31836d-catalog-content\") pod \"a41e22f1-4643-4ebc-ae43-7c0e9f31836d\" (UID: \"a41e22f1-4643-4ebc-ae43-7c0e9f31836d\") " Nov 28 13:43:57 crc kubenswrapper[4857]: I1128 13:43:57.629652 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a41e22f1-4643-4ebc-ae43-7c0e9f31836d-utilities\") pod \"a41e22f1-4643-4ebc-ae43-7c0e9f31836d\" (UID: \"a41e22f1-4643-4ebc-ae43-7c0e9f31836d\") " Nov 28 13:43:57 crc kubenswrapper[4857]: I1128 13:43:57.629746 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nsr6f\" (UniqueName: \"kubernetes.io/projected/a41e22f1-4643-4ebc-ae43-7c0e9f31836d-kube-api-access-nsr6f\") pod \"a41e22f1-4643-4ebc-ae43-7c0e9f31836d\" (UID: \"a41e22f1-4643-4ebc-ae43-7c0e9f31836d\") " Nov 28 13:43:57 crc kubenswrapper[4857]: I1128 13:43:57.631278 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a41e22f1-4643-4ebc-ae43-7c0e9f31836d-utilities" (OuterVolumeSpecName: "utilities") pod "a41e22f1-4643-4ebc-ae43-7c0e9f31836d" (UID: "a41e22f1-4643-4ebc-ae43-7c0e9f31836d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:57 crc kubenswrapper[4857]: I1128 13:43:57.636615 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a41e22f1-4643-4ebc-ae43-7c0e9f31836d-kube-api-access-nsr6f" (OuterVolumeSpecName: "kube-api-access-nsr6f") pod "a41e22f1-4643-4ebc-ae43-7c0e9f31836d" (UID: "a41e22f1-4643-4ebc-ae43-7c0e9f31836d"). InnerVolumeSpecName "kube-api-access-nsr6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:43:57 crc kubenswrapper[4857]: I1128 13:43:57.732146 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nsr6f\" (UniqueName: \"kubernetes.io/projected/a41e22f1-4643-4ebc-ae43-7c0e9f31836d-kube-api-access-nsr6f\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:57 crc kubenswrapper[4857]: I1128 13:43:57.732188 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a41e22f1-4643-4ebc-ae43-7c0e9f31836d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:57 crc kubenswrapper[4857]: I1128 13:43:57.739199 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a41e22f1-4643-4ebc-ae43-7c0e9f31836d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a41e22f1-4643-4ebc-ae43-7c0e9f31836d" (UID: "a41e22f1-4643-4ebc-ae43-7c0e9f31836d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:57 crc kubenswrapper[4857]: I1128 13:43:57.835104 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a41e22f1-4643-4ebc-ae43-7c0e9f31836d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:58 crc kubenswrapper[4857]: I1128 13:43:58.390277 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-db4ks" Nov 28 13:43:58 crc kubenswrapper[4857]: I1128 13:43:58.392506 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-db4ks" event={"ID":"a41e22f1-4643-4ebc-ae43-7c0e9f31836d","Type":"ContainerDied","Data":"3626043f5ed62ac74c46b4886cee30b63e41077edef79cffafdbde3b85d8df6e"} Nov 28 13:43:58 crc kubenswrapper[4857]: I1128 13:43:58.392558 4857 scope.go:117] "RemoveContainer" containerID="4f6ccffc5a220df93bff75c0314e79f475102df60573550749910e0b3504d523" Nov 28 13:43:58 crc kubenswrapper[4857]: I1128 13:43:58.428932 4857 scope.go:117] "RemoveContainer" containerID="f74b93ee7b2cb6b3b74e5d7d379693cfb5bc28c732c438affe4653dcedcab1fe" Nov 28 13:43:58 crc kubenswrapper[4857]: I1128 13:43:58.446910 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-db4ks"] Nov 28 13:43:58 crc kubenswrapper[4857]: I1128 13:43:58.452394 4857 scope.go:117] "RemoveContainer" containerID="a07035d352b16e622aadb233282f9aa23680bc2e5b44da556610ff8bd83e1c99" Nov 28 13:43:58 crc kubenswrapper[4857]: I1128 13:43:58.452870 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5rq62" Nov 28 13:43:58 crc kubenswrapper[4857]: I1128 13:43:58.466582 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-db4ks"] Nov 28 13:43:59 crc kubenswrapper[4857]: I1128 13:43:59.268248 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5rq62"] Nov 28 13:44:00 crc kubenswrapper[4857]: I1128 13:44:00.320624 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a41e22f1-4643-4ebc-ae43-7c0e9f31836d" path="/var/lib/kubelet/pods/a41e22f1-4643-4ebc-ae43-7c0e9f31836d/volumes" Nov 28 13:44:00 crc kubenswrapper[4857]: I1128 13:44:00.403408 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5rq62" podUID="f01273d3-6c15-4b75-b19e-a1bd1ca86283" containerName="registry-server" containerID="cri-o://4713d67a1da1d4cc886d06bddef8243eea9d54b7ac27a8b832c3e32d7368dcda" gracePeriod=2 Nov 28 13:44:00 crc kubenswrapper[4857]: I1128 13:44:00.846975 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5rq62" Nov 28 13:44:00 crc kubenswrapper[4857]: I1128 13:44:00.929921 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f01273d3-6c15-4b75-b19e-a1bd1ca86283-catalog-content\") pod \"f01273d3-6c15-4b75-b19e-a1bd1ca86283\" (UID: \"f01273d3-6c15-4b75-b19e-a1bd1ca86283\") " Nov 28 13:44:00 crc kubenswrapper[4857]: I1128 13:44:00.929962 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f01273d3-6c15-4b75-b19e-a1bd1ca86283-utilities\") pod \"f01273d3-6c15-4b75-b19e-a1bd1ca86283\" (UID: \"f01273d3-6c15-4b75-b19e-a1bd1ca86283\") " Nov 28 13:44:00 crc kubenswrapper[4857]: I1128 13:44:00.930049 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjr5f\" (UniqueName: \"kubernetes.io/projected/f01273d3-6c15-4b75-b19e-a1bd1ca86283-kube-api-access-hjr5f\") pod \"f01273d3-6c15-4b75-b19e-a1bd1ca86283\" (UID: \"f01273d3-6c15-4b75-b19e-a1bd1ca86283\") " Nov 28 13:44:00 crc kubenswrapper[4857]: I1128 13:44:00.931176 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f01273d3-6c15-4b75-b19e-a1bd1ca86283-utilities" (OuterVolumeSpecName: "utilities") pod "f01273d3-6c15-4b75-b19e-a1bd1ca86283" (UID: "f01273d3-6c15-4b75-b19e-a1bd1ca86283"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:00 crc kubenswrapper[4857]: I1128 13:44:00.935284 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f01273d3-6c15-4b75-b19e-a1bd1ca86283-kube-api-access-hjr5f" (OuterVolumeSpecName: "kube-api-access-hjr5f") pod "f01273d3-6c15-4b75-b19e-a1bd1ca86283" (UID: "f01273d3-6c15-4b75-b19e-a1bd1ca86283"). InnerVolumeSpecName "kube-api-access-hjr5f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:00 crc kubenswrapper[4857]: I1128 13:44:00.948024 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f01273d3-6c15-4b75-b19e-a1bd1ca86283-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f01273d3-6c15-4b75-b19e-a1bd1ca86283" (UID: "f01273d3-6c15-4b75-b19e-a1bd1ca86283"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.034845 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjr5f\" (UniqueName: \"kubernetes.io/projected/f01273d3-6c15-4b75-b19e-a1bd1ca86283-kube-api-access-hjr5f\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.034921 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f01273d3-6c15-4b75-b19e-a1bd1ca86283-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.034948 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f01273d3-6c15-4b75-b19e-a1bd1ca86283-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.415484 4857 generic.go:334] "Generic (PLEG): container finished" podID="f01273d3-6c15-4b75-b19e-a1bd1ca86283" containerID="4713d67a1da1d4cc886d06bddef8243eea9d54b7ac27a8b832c3e32d7368dcda" exitCode=0 Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.415533 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5rq62" event={"ID":"f01273d3-6c15-4b75-b19e-a1bd1ca86283","Type":"ContainerDied","Data":"4713d67a1da1d4cc886d06bddef8243eea9d54b7ac27a8b832c3e32d7368dcda"} Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.415545 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5rq62" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.415568 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5rq62" event={"ID":"f01273d3-6c15-4b75-b19e-a1bd1ca86283","Type":"ContainerDied","Data":"9acc2a85e37ab29c253f85544411400ef9599d1335fa2f295cddf878e0a4b2e2"} Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.415595 4857 scope.go:117] "RemoveContainer" containerID="4713d67a1da1d4cc886d06bddef8243eea9d54b7ac27a8b832c3e32d7368dcda" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.477564 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5rq62"] Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.480089 4857 scope.go:117] "RemoveContainer" containerID="e58e384b9f0dc0fea5efa08f06c714f6da314a985617a5ac073df5e19f556088" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.487012 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5rq62"] Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.504551 4857 scope.go:117] "RemoveContainer" containerID="36aaf5dd33d18ad66f99d6923353e1fba86b839da0ebc8a8f45b3aa71f80cd38" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.552924 4857 scope.go:117] "RemoveContainer" containerID="4713d67a1da1d4cc886d06bddef8243eea9d54b7ac27a8b832c3e32d7368dcda" Nov 28 13:44:01 crc kubenswrapper[4857]: E1128 13:44:01.553569 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4713d67a1da1d4cc886d06bddef8243eea9d54b7ac27a8b832c3e32d7368dcda\": container with ID starting with 4713d67a1da1d4cc886d06bddef8243eea9d54b7ac27a8b832c3e32d7368dcda not found: ID does not exist" containerID="4713d67a1da1d4cc886d06bddef8243eea9d54b7ac27a8b832c3e32d7368dcda" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.553622 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4713d67a1da1d4cc886d06bddef8243eea9d54b7ac27a8b832c3e32d7368dcda"} err="failed to get container status \"4713d67a1da1d4cc886d06bddef8243eea9d54b7ac27a8b832c3e32d7368dcda\": rpc error: code = NotFound desc = could not find container \"4713d67a1da1d4cc886d06bddef8243eea9d54b7ac27a8b832c3e32d7368dcda\": container with ID starting with 4713d67a1da1d4cc886d06bddef8243eea9d54b7ac27a8b832c3e32d7368dcda not found: ID does not exist" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.553664 4857 scope.go:117] "RemoveContainer" containerID="e58e384b9f0dc0fea5efa08f06c714f6da314a985617a5ac073df5e19f556088" Nov 28 13:44:01 crc kubenswrapper[4857]: E1128 13:44:01.553985 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e58e384b9f0dc0fea5efa08f06c714f6da314a985617a5ac073df5e19f556088\": container with ID starting with e58e384b9f0dc0fea5efa08f06c714f6da314a985617a5ac073df5e19f556088 not found: ID does not exist" containerID="e58e384b9f0dc0fea5efa08f06c714f6da314a985617a5ac073df5e19f556088" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.554014 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e58e384b9f0dc0fea5efa08f06c714f6da314a985617a5ac073df5e19f556088"} err="failed to get container status \"e58e384b9f0dc0fea5efa08f06c714f6da314a985617a5ac073df5e19f556088\": rpc error: code = NotFound desc = could not find container \"e58e384b9f0dc0fea5efa08f06c714f6da314a985617a5ac073df5e19f556088\": container with ID starting with e58e384b9f0dc0fea5efa08f06c714f6da314a985617a5ac073df5e19f556088 not found: ID does not exist" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.554035 4857 scope.go:117] "RemoveContainer" containerID="36aaf5dd33d18ad66f99d6923353e1fba86b839da0ebc8a8f45b3aa71f80cd38" Nov 28 13:44:01 crc kubenswrapper[4857]: E1128 13:44:01.554357 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36aaf5dd33d18ad66f99d6923353e1fba86b839da0ebc8a8f45b3aa71f80cd38\": container with ID starting with 36aaf5dd33d18ad66f99d6923353e1fba86b839da0ebc8a8f45b3aa71f80cd38 not found: ID does not exist" containerID="36aaf5dd33d18ad66f99d6923353e1fba86b839da0ebc8a8f45b3aa71f80cd38" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.554426 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36aaf5dd33d18ad66f99d6923353e1fba86b839da0ebc8a8f45b3aa71f80cd38"} err="failed to get container status \"36aaf5dd33d18ad66f99d6923353e1fba86b839da0ebc8a8f45b3aa71f80cd38\": rpc error: code = NotFound desc = could not find container \"36aaf5dd33d18ad66f99d6923353e1fba86b839da0ebc8a8f45b3aa71f80cd38\": container with ID starting with 36aaf5dd33d18ad66f99d6923353e1fba86b839da0ebc8a8f45b3aa71f80cd38 not found: ID does not exist" Nov 28 13:44:02 crc kubenswrapper[4857]: I1128 13:44:02.336974 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f01273d3-6c15-4b75-b19e-a1bd1ca86283" path="/var/lib/kubelet/pods/f01273d3-6c15-4b75-b19e-a1bd1ca86283/volumes" Nov 28 13:44:03 crc kubenswrapper[4857]: I1128 13:44:03.177679 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:44:03 crc kubenswrapper[4857]: I1128 13:44:03.177789 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.339207 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.339879 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="07621208-d831-4470-908c-76084c830753" containerName="openstackclient" containerID="cri-o://f53ded071ea01c8120bc4be89662f272bd544a870cac413afe982b426f8f618a" gracePeriod=2 Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.384764 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.586782 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glanceeef9-account-delete-chvxh"] Nov 28 13:44:06 crc kubenswrapper[4857]: E1128 13:44:06.587198 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f01273d3-6c15-4b75-b19e-a1bd1ca86283" containerName="extract-utilities" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.587217 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f01273d3-6c15-4b75-b19e-a1bd1ca86283" containerName="extract-utilities" Nov 28 13:44:06 crc kubenswrapper[4857]: E1128 13:44:06.587230 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a41e22f1-4643-4ebc-ae43-7c0e9f31836d" containerName="extract-content" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.587237 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a41e22f1-4643-4ebc-ae43-7c0e9f31836d" containerName="extract-content" Nov 28 13:44:06 crc kubenswrapper[4857]: E1128 13:44:06.587250 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a41e22f1-4643-4ebc-ae43-7c0e9f31836d" containerName="registry-server" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.587257 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a41e22f1-4643-4ebc-ae43-7c0e9f31836d" containerName="registry-server" Nov 28 13:44:06 crc kubenswrapper[4857]: E1128 13:44:06.587265 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f01273d3-6c15-4b75-b19e-a1bd1ca86283" containerName="extract-content" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.587272 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f01273d3-6c15-4b75-b19e-a1bd1ca86283" containerName="extract-content" Nov 28 13:44:06 crc kubenswrapper[4857]: E1128 13:44:06.587280 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a41e22f1-4643-4ebc-ae43-7c0e9f31836d" containerName="extract-utilities" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.587286 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a41e22f1-4643-4ebc-ae43-7c0e9f31836d" containerName="extract-utilities" Nov 28 13:44:06 crc kubenswrapper[4857]: E1128 13:44:06.587299 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f01273d3-6c15-4b75-b19e-a1bd1ca86283" containerName="registry-server" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.587306 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f01273d3-6c15-4b75-b19e-a1bd1ca86283" containerName="registry-server" Nov 28 13:44:06 crc kubenswrapper[4857]: E1128 13:44:06.587324 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07621208-d831-4470-908c-76084c830753" containerName="openstackclient" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.587331 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="07621208-d831-4470-908c-76084c830753" containerName="openstackclient" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.587541 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="07621208-d831-4470-908c-76084c830753" containerName="openstackclient" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.587558 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a41e22f1-4643-4ebc-ae43-7c0e9f31836d" containerName="registry-server" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.587573 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f01273d3-6c15-4b75-b19e-a1bd1ca86283" containerName="registry-server" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.588252 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glanceeef9-account-delete-chvxh" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.620042 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glanceeef9-account-delete-chvxh"] Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.661208 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.661464 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="076d849e-fd88-4add-a5f9-e45a1983a606" containerName="ovn-northd" containerID="cri-o://d8bcc929d2d6bab5c64bef5dbfc2e3fbdfbb52f64ab6c01fd92f824932851397" gracePeriod=30 Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.661907 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="076d849e-fd88-4add-a5f9-e45a1983a606" containerName="openstack-network-exporter" containerID="cri-o://0d984f57d2f9c989b335dc40eddb0295b7c07a2ff3153367f9c77e845c49ab2d" gracePeriod=30 Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.679107 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxg8t\" (UniqueName: \"kubernetes.io/projected/bf861df0-ad6e-4a39-9932-395afa59e76d-kube-api-access-fxg8t\") pod \"glanceeef9-account-delete-chvxh\" (UID: \"bf861df0-ad6e-4a39-9932-395afa59e76d\") " pod="openstack/glanceeef9-account-delete-chvxh" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.679206 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf861df0-ad6e-4a39-9932-395afa59e76d-operator-scripts\") pod \"glanceeef9-account-delete-chvxh\" (UID: \"bf861df0-ad6e-4a39-9932-395afa59e76d\") " pod="openstack/glanceeef9-account-delete-chvxh" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.780030 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placementcdcd-account-delete-h5qc4"] Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.781238 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placementcdcd-account-delete-h5qc4" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.787884 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxg8t\" (UniqueName: \"kubernetes.io/projected/bf861df0-ad6e-4a39-9932-395afa59e76d-kube-api-access-fxg8t\") pod \"glanceeef9-account-delete-chvxh\" (UID: \"bf861df0-ad6e-4a39-9932-395afa59e76d\") " pod="openstack/glanceeef9-account-delete-chvxh" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.787955 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf861df0-ad6e-4a39-9932-395afa59e76d-operator-scripts\") pod \"glanceeef9-account-delete-chvxh\" (UID: \"bf861df0-ad6e-4a39-9932-395afa59e76d\") " pod="openstack/glanceeef9-account-delete-chvxh" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.788883 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf861df0-ad6e-4a39-9932-395afa59e76d-operator-scripts\") pod \"glanceeef9-account-delete-chvxh\" (UID: \"bf861df0-ad6e-4a39-9932-395afa59e76d\") " pod="openstack/glanceeef9-account-delete-chvxh" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.806738 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placementcdcd-account-delete-h5qc4"] Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.817019 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxg8t\" (UniqueName: \"kubernetes.io/projected/bf861df0-ad6e-4a39-9932-395afa59e76d-kube-api-access-fxg8t\") pod \"glanceeef9-account-delete-chvxh\" (UID: \"bf861df0-ad6e-4a39-9932-395afa59e76d\") " pod="openstack/glanceeef9-account-delete-chvxh" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.889540 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqqr9\" (UniqueName: \"kubernetes.io/projected/e1ac27d1-5ad2-40ed-af2b-18668e48ead3-kube-api-access-mqqr9\") pod \"placementcdcd-account-delete-h5qc4\" (UID: \"e1ac27d1-5ad2-40ed-af2b-18668e48ead3\") " pod="openstack/placementcdcd-account-delete-h5qc4" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.889822 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1ac27d1-5ad2-40ed-af2b-18668e48ead3-operator-scripts\") pod \"placementcdcd-account-delete-h5qc4\" (UID: \"e1ac27d1-5ad2-40ed-af2b-18668e48ead3\") " pod="openstack/placementcdcd-account-delete-h5qc4" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.912166 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glanceeef9-account-delete-chvxh" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.912716 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.923967 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron5867-account-delete-58twd"] Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.925221 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron5867-account-delete-58twd" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.955811 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron5867-account-delete-58twd"] Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.991726 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1ac27d1-5ad2-40ed-af2b-18668e48ead3-operator-scripts\") pod \"placementcdcd-account-delete-h5qc4\" (UID: \"e1ac27d1-5ad2-40ed-af2b-18668e48ead3\") " pod="openstack/placementcdcd-account-delete-h5qc4" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.992158 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqqr9\" (UniqueName: \"kubernetes.io/projected/e1ac27d1-5ad2-40ed-af2b-18668e48ead3-kube-api-access-mqqr9\") pod \"placementcdcd-account-delete-h5qc4\" (UID: \"e1ac27d1-5ad2-40ed-af2b-18668e48ead3\") " pod="openstack/placementcdcd-account-delete-h5qc4" Nov 28 13:44:06 crc kubenswrapper[4857]: I1128 13:44:06.999543 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1ac27d1-5ad2-40ed-af2b-18668e48ead3-operator-scripts\") pod \"placementcdcd-account-delete-h5qc4\" (UID: \"e1ac27d1-5ad2-40ed-af2b-18668e48ead3\") " pod="openstack/placementcdcd-account-delete-h5qc4" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.028980 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.029634 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="6133e02f-8ece-4b6b-ac4a-c3871e017c1e" containerName="openstack-network-exporter" containerID="cri-o://1412a3b0dbe8994e239691c6f96324d04036bd95c69256db95ab0b15e2c14255" gracePeriod=300 Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.069243 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqqr9\" (UniqueName: \"kubernetes.io/projected/e1ac27d1-5ad2-40ed-af2b-18668e48ead3-kube-api-access-mqqr9\") pod \"placementcdcd-account-delete-h5qc4\" (UID: \"e1ac27d1-5ad2-40ed-af2b-18668e48ead3\") " pod="openstack/placementcdcd-account-delete-h5qc4" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.093667 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4wx8\" (UniqueName: \"kubernetes.io/projected/89adcb9a-b993-4e60-ae3b-413bed35ae0d-kube-api-access-x4wx8\") pod \"neutron5867-account-delete-58twd\" (UID: \"89adcb9a-b993-4e60-ae3b-413bed35ae0d\") " pod="openstack/neutron5867-account-delete-58twd" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.093814 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/89adcb9a-b993-4e60-ae3b-413bed35ae0d-operator-scripts\") pod \"neutron5867-account-delete-58twd\" (UID: \"89adcb9a-b993-4e60-ae3b-413bed35ae0d\") " pod="openstack/neutron5867-account-delete-58twd" Nov 28 13:44:07 crc kubenswrapper[4857]: E1128 13:44:07.095125 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 13:44:07 crc kubenswrapper[4857]: E1128 13:44:07.095173 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-config-data podName:71cc1f00-1a63-428e-8f12-2136ab077860 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:07.595156979 +0000 UTC m=+1539.622532146 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-config-data") pod "rabbitmq-cell1-server-0" (UID: "71cc1f00-1a63-428e-8f12-2136ab077860") : configmap "rabbitmq-cell1-config-data" not found Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.098883 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican7dd8-account-delete-jg2j5"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.110343 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican7dd8-account-delete-jg2j5" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.137102 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-dwjz2"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.181099 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-dwjz2"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.196765 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/89adcb9a-b993-4e60-ae3b-413bed35ae0d-operator-scripts\") pod \"neutron5867-account-delete-58twd\" (UID: \"89adcb9a-b993-4e60-ae3b-413bed35ae0d\") " pod="openstack/neutron5867-account-delete-58twd" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.196906 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4wx8\" (UniqueName: \"kubernetes.io/projected/89adcb9a-b993-4e60-ae3b-413bed35ae0d-kube-api-access-x4wx8\") pod \"neutron5867-account-delete-58twd\" (UID: \"89adcb9a-b993-4e60-ae3b-413bed35ae0d\") " pod="openstack/neutron5867-account-delete-58twd" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.197661 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/89adcb9a-b993-4e60-ae3b-413bed35ae0d-operator-scripts\") pod \"neutron5867-account-delete-58twd\" (UID: \"89adcb9a-b993-4e60-ae3b-413bed35ae0d\") " pod="openstack/neutron5867-account-delete-58twd" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.221962 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican7dd8-account-delete-jg2j5"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.246536 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4wx8\" (UniqueName: \"kubernetes.io/projected/89adcb9a-b993-4e60-ae3b-413bed35ae0d-kube-api-access-x4wx8\") pod \"neutron5867-account-delete-58twd\" (UID: \"89adcb9a-b993-4e60-ae3b-413bed35ae0d\") " pod="openstack/neutron5867-account-delete-58twd" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.246956 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-qrf4k"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.276236 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-qrf4k"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.286114 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placementcdcd-account-delete-h5qc4" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.299876 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dkr6\" (UniqueName: \"kubernetes.io/projected/d8c0e041-9c74-4a06-a966-833e919e745a-kube-api-access-6dkr6\") pod \"barbican7dd8-account-delete-jg2j5\" (UID: \"d8c0e041-9c74-4a06-a966-833e919e745a\") " pod="openstack/barbican7dd8-account-delete-jg2j5" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.300140 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts\") pod \"barbican7dd8-account-delete-jg2j5\" (UID: \"d8c0e041-9c74-4a06-a966-833e919e745a\") " pod="openstack/barbican7dd8-account-delete-jg2j5" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.340538 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinderd8b3-account-delete-lxwj8"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.341811 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinderd8b3-account-delete-lxwj8" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.360860 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-46vxl"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.400869 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.402188 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dkr6\" (UniqueName: \"kubernetes.io/projected/d8c0e041-9c74-4a06-a966-833e919e745a-kube-api-access-6dkr6\") pod \"barbican7dd8-account-delete-jg2j5\" (UID: \"d8c0e041-9c74-4a06-a966-833e919e745a\") " pod="openstack/barbican7dd8-account-delete-jg2j5" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.402336 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts\") pod \"barbican7dd8-account-delete-jg2j5\" (UID: \"d8c0e041-9c74-4a06-a966-833e919e745a\") " pod="openstack/barbican7dd8-account-delete-jg2j5" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.403121 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts\") pod \"barbican7dd8-account-delete-jg2j5\" (UID: \"d8c0e041-9c74-4a06-a966-833e919e745a\") " pod="openstack/barbican7dd8-account-delete-jg2j5" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.415601 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinderd8b3-account-delete-lxwj8"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.426254 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-46vxl"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.440021 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-sp8xb"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.442501 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-sp8xb"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.443198 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron5867-account-delete-58twd" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.456806 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-g9jf5"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.491804 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dkr6\" (UniqueName: \"kubernetes.io/projected/d8c0e041-9c74-4a06-a966-833e919e745a-kube-api-access-6dkr6\") pod \"barbican7dd8-account-delete-jg2j5\" (UID: \"d8c0e041-9c74-4a06-a966-833e919e745a\") " pod="openstack/barbican7dd8-account-delete-jg2j5" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.496481 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-g9jf5"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.504653 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="6133e02f-8ece-4b6b-ac4a-c3871e017c1e" containerName="ovsdbserver-nb" containerID="cri-o://7f7bc0064f521471cf62c8788f876e9c2ad9aae9c8e92b24025dd3c24bcd9aaf" gracePeriod=300 Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.505281 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts\") pod \"cinderd8b3-account-delete-lxwj8\" (UID: \"24a3dca4-a3d0-479d-9be8-fb8c16f97a77\") " pod="openstack/cinderd8b3-account-delete-lxwj8" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.505399 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9k2h8\" (UniqueName: \"kubernetes.io/projected/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-kube-api-access-9k2h8\") pod \"cinderd8b3-account-delete-lxwj8\" (UID: \"24a3dca4-a3d0-479d-9be8-fb8c16f97a77\") " pod="openstack/cinderd8b3-account-delete-lxwj8" Nov 28 13:44:07 crc kubenswrapper[4857]: E1128 13:44:07.506931 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 13:44:07 crc kubenswrapper[4857]: E1128 13:44:07.507010 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-config-data podName:cfbd0457-d459-4bf2-bdaf-8b61db5cce65 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:08.006991577 +0000 UTC m=+1540.034366744 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-config-data") pod "rabbitmq-server-0" (UID: "cfbd0457-d459-4bf2-bdaf-8b61db5cce65") : configmap "rabbitmq-config-data" not found Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.620267 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-ph2cf"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.632928 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9k2h8\" (UniqueName: \"kubernetes.io/projected/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-kube-api-access-9k2h8\") pod \"cinderd8b3-account-delete-lxwj8\" (UID: \"24a3dca4-a3d0-479d-9be8-fb8c16f97a77\") " pod="openstack/cinderd8b3-account-delete-lxwj8" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.633370 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts\") pod \"cinderd8b3-account-delete-lxwj8\" (UID: \"24a3dca4-a3d0-479d-9be8-fb8c16f97a77\") " pod="openstack/cinderd8b3-account-delete-lxwj8" Nov 28 13:44:07 crc kubenswrapper[4857]: E1128 13:44:07.633658 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 13:44:07 crc kubenswrapper[4857]: E1128 13:44:07.633721 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-config-data podName:71cc1f00-1a63-428e-8f12-2136ab077860 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:08.633698724 +0000 UTC m=+1540.661073891 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-config-data") pod "rabbitmq-cell1-server-0" (UID: "71cc1f00-1a63-428e-8f12-2136ab077860") : configmap "rabbitmq-cell1-config-data" not found Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.634896 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts\") pod \"cinderd8b3-account-delete-lxwj8\" (UID: \"24a3dca4-a3d0-479d-9be8-fb8c16f97a77\") " pod="openstack/cinderd8b3-account-delete-lxwj8" Nov 28 13:44:07 crc kubenswrapper[4857]: E1128 13:44:07.682126 4857 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.227:39074->38.102.83.227:40213: write tcp 38.102.83.227:39074->38.102.83.227:40213: write: broken pipe Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.704567 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9k2h8\" (UniqueName: \"kubernetes.io/projected/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-kube-api-access-9k2h8\") pod \"cinderd8b3-account-delete-lxwj8\" (UID: \"24a3dca4-a3d0-479d-9be8-fb8c16f97a77\") " pod="openstack/cinderd8b3-account-delete-lxwj8" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.747815 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-t99ql"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.757936 4857 generic.go:334] "Generic (PLEG): container finished" podID="076d849e-fd88-4add-a5f9-e45a1983a606" containerID="0d984f57d2f9c989b335dc40eddb0295b7c07a2ff3153367f9c77e845c49ab2d" exitCode=2 Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.758045 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"076d849e-fd88-4add-a5f9-e45a1983a606","Type":"ContainerDied","Data":"0d984f57d2f9c989b335dc40eddb0295b7c07a2ff3153367f9c77e845c49ab2d"} Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.797316 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_6133e02f-8ece-4b6b-ac4a-c3871e017c1e/ovsdbserver-nb/0.log" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.797367 4857 generic.go:334] "Generic (PLEG): container finished" podID="6133e02f-8ece-4b6b-ac4a-c3871e017c1e" containerID="1412a3b0dbe8994e239691c6f96324d04036bd95c69256db95ab0b15e2c14255" exitCode=2 Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.797397 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"6133e02f-8ece-4b6b-ac4a-c3871e017c1e","Type":"ContainerDied","Data":"1412a3b0dbe8994e239691c6f96324d04036bd95c69256db95ab0b15e2c14255"} Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.822618 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-k7b77"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.822951 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-k7b77" podUID="8bb8cc13-eda7-4c41-9878-77ddabd55f4b" containerName="openstack-network-exporter" containerID="cri-o://c9dfde4fc40c233928885d1fb977721a4b1687b44081115fcf5a00db9ce6907f" gracePeriod=30 Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.881984 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican7dd8-account-delete-jg2j5" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.902260 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novaapi7cc9-account-delete-qjqg5"] Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.903573 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi7cc9-account-delete-qjqg5" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.957108 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinderd8b3-account-delete-lxwj8" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.968868 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapi7cc9-account-delete-qjqg5"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.012983 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-h799k"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.013298 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" podUID="0cb677df-7237-4b82-8806-d7abedfad40c" containerName="dnsmasq-dns" containerID="cri-o://54ebf8119e8b85e98e03f36c99c69b277451e9d96ff0dda0b092e460eb535292" gracePeriod=10 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.080097 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.080533 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="3a952329-a8d9-432d-ac5b-d88b7e2ede6b" containerName="glance-httpd" containerID="cri-o://d8e862b58223c1ae15f7828a07974724e3a49c1477b31569a8dbea821c8bc09e" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.080405 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="3a952329-a8d9-432d-ac5b-d88b7e2ede6b" containerName="glance-log" containerID="cri-o://990eadd5834f267197096c5bc4a36f6e0524a5b8386ca4956f7f56c8c34c8ce5" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.102185 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts\") pod \"novaapi7cc9-account-delete-qjqg5\" (UID: \"6f75b361-6a38-42a4-971c-1b3a68a3f10f\") " pod="openstack/novaapi7cc9-account-delete-qjqg5" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.102237 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qfwm\" (UniqueName: \"kubernetes.io/projected/6f75b361-6a38-42a4-971c-1b3a68a3f10f-kube-api-access-6qfwm\") pod \"novaapi7cc9-account-delete-qjqg5\" (UID: \"6f75b361-6a38-42a4-971c-1b3a68a3f10f\") " pod="openstack/novaapi7cc9-account-delete-qjqg5" Nov 28 13:44:08 crc kubenswrapper[4857]: E1128 13:44:08.102420 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 13:44:08 crc kubenswrapper[4857]: E1128 13:44:08.102468 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-config-data podName:cfbd0457-d459-4bf2-bdaf-8b61db5cce65 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:09.102452581 +0000 UTC m=+1541.129827748 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-config-data") pod "rabbitmq-server-0" (UID: "cfbd0457-d459-4bf2-bdaf-8b61db5cce65") : configmap "rabbitmq-config-data" not found Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.153496 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.168864 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="e2fec95b-4e40-4761-9d14-6abfeb78d9c0" containerName="openstack-network-exporter" containerID="cri-o://8dea0ac70575cce5895b3d523d663e080996fff826c13ed3d4f241bb7bf27649" gracePeriod=300 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.209211 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-749fd8cf96-rbd6r"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.209672 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-749fd8cf96-rbd6r" podUID="946c0669-4c99-46b7-a9ff-437042383642" containerName="placement-log" containerID="cri-o://9f2936fe928f6000c1df2fe80515f9fd71cc2a258636283c70afbe2ab56dcf0b" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.209820 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-749fd8cf96-rbd6r" podUID="946c0669-4c99-46b7-a9ff-437042383642" containerName="placement-api" containerID="cri-o://4bdd0ee5b2dc8d0eba75e5970152f8cfe9df74f09930b295ed3cf6ddb62ac999" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.215402 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts\") pod \"novaapi7cc9-account-delete-qjqg5\" (UID: \"6f75b361-6a38-42a4-971c-1b3a68a3f10f\") " pod="openstack/novaapi7cc9-account-delete-qjqg5" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.215723 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qfwm\" (UniqueName: \"kubernetes.io/projected/6f75b361-6a38-42a4-971c-1b3a68a3f10f-kube-api-access-6qfwm\") pod \"novaapi7cc9-account-delete-qjqg5\" (UID: \"6f75b361-6a38-42a4-971c-1b3a68a3f10f\") " pod="openstack/novaapi7cc9-account-delete-qjqg5" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.221065 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts\") pod \"novaapi7cc9-account-delete-qjqg5\" (UID: \"6f75b361-6a38-42a4-971c-1b3a68a3f10f\") " pod="openstack/novaapi7cc9-account-delete-qjqg5" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.260654 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-xhgkc"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.269390 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qfwm\" (UniqueName: \"kubernetes.io/projected/6f75b361-6a38-42a4-971c-1b3a68a3f10f-kube-api-access-6qfwm\") pod \"novaapi7cc9-account-delete-qjqg5\" (UID: \"6f75b361-6a38-42a4-971c-1b3a68a3f10f\") " pod="openstack/novaapi7cc9-account-delete-qjqg5" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.287665 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell10c56-account-delete-6l7tk"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.289207 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell10c56-account-delete-6l7tk" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.341268 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftjvm\" (UniqueName: \"kubernetes.io/projected/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-kube-api-access-ftjvm\") pod \"novacell10c56-account-delete-6l7tk\" (UID: \"a81fb5f5-33d2-4da6-86a6-d2f248a3364f\") " pod="openstack/novacell10c56-account-delete-6l7tk" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.341348 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-operator-scripts\") pod \"novacell10c56-account-delete-6l7tk\" (UID: \"a81fb5f5-33d2-4da6-86a6-d2f248a3364f\") " pod="openstack/novacell10c56-account-delete-6l7tk" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.400361 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="574e8323-bfa6-4c1d-9a87-53f09671c900" path="/var/lib/kubelet/pods/574e8323-bfa6-4c1d-9a87-53f09671c900/volumes" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.401860 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="779e7a51-657e-47ae-a068-3cd339cd9bb1" path="/var/lib/kubelet/pods/779e7a51-657e-47ae-a068-3cd339cd9bb1/volumes" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.402477 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d" path="/var/lib/kubelet/pods/a80ad59d-c2f8-4bc8-a33d-6a4a6e161d3d/volumes" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.403553 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7eee1cb-c5d6-45e4-a007-0d29935cd83a" path="/var/lib/kubelet/pods/d7eee1cb-c5d6-45e4-a007-0d29935cd83a/volumes" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.404157 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e603bbb8-24d6-43aa-bd0e-0039d8abc8e2" path="/var/lib/kubelet/pods/e603bbb8-24d6-43aa-bd0e-0039d8abc8e2/volumes" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.404663 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-xhgkc"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.448894 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftjvm\" (UniqueName: \"kubernetes.io/projected/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-kube-api-access-ftjvm\") pod \"novacell10c56-account-delete-6l7tk\" (UID: \"a81fb5f5-33d2-4da6-86a6-d2f248a3364f\") " pod="openstack/novacell10c56-account-delete-6l7tk" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.448967 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-operator-scripts\") pod \"novacell10c56-account-delete-6l7tk\" (UID: \"a81fb5f5-33d2-4da6-86a6-d2f248a3364f\") " pod="openstack/novacell10c56-account-delete-6l7tk" Nov 28 13:44:08 crc kubenswrapper[4857]: E1128 13:44:08.449170 4857 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Nov 28 13:44:08 crc kubenswrapper[4857]: E1128 13:44:08.449220 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-operator-scripts podName:a81fb5f5-33d2-4da6-86a6-d2f248a3364f nodeName:}" failed. No retries permitted until 2025-11-28 13:44:08.949203057 +0000 UTC m=+1540.976578214 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-operator-scripts") pod "novacell10c56-account-delete-6l7tk" (UID: "a81fb5f5-33d2-4da6-86a6-d2f248a3364f") : configmap "openstack-cell1-scripts" not found Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.463924 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell10c56-account-delete-6l7tk"] Nov 28 13:44:08 crc kubenswrapper[4857]: E1128 13:44:08.471271 4857 projected.go:194] Error preparing data for projected volume kube-api-access-ftjvm for pod openstack/novacell10c56-account-delete-6l7tk: failed to fetch token: serviceaccounts "galera-openstack-cell1" not found Nov 28 13:44:08 crc kubenswrapper[4857]: E1128 13:44:08.471332 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-kube-api-access-ftjvm podName:a81fb5f5-33d2-4da6-86a6-d2f248a3364f nodeName:}" failed. No retries permitted until 2025-11-28 13:44:08.971313199 +0000 UTC m=+1540.998688366 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-ftjvm" (UniqueName: "kubernetes.io/projected/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-kube-api-access-ftjvm") pod "novacell10c56-account-delete-6l7tk" (UID: "a81fb5f5-33d2-4da6-86a6-d2f248a3364f") : failed to fetch token: serviceaccounts "galera-openstack-cell1" not found Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.491365 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell032fc-account-delete-xk7xx"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.492871 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell032fc-account-delete-xk7xx" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.505817 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell032fc-account-delete-xk7xx"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.530245 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:44:08 crc kubenswrapper[4857]: E1128 13:44:08.530656 4857 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err="command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: " execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-t99ql" message="Exiting ovn-controller (1) " Nov 28 13:44:08 crc kubenswrapper[4857]: E1128 13:44:08.530688 4857 kuberuntime_container.go:691] "PreStop hook failed" err="command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: " pod="openstack/ovn-controller-t99ql" podUID="b1f7e362-6e6b-4636-b551-4533ad037811" containerName="ovn-controller" containerID="cri-o://015bbd0a1a1e9fb405214fe7a35a6c512629833b2d306bd11d97dfd7b5021dee" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.530676 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="7bee7127-9367-4882-8ab1-0493128d2641" containerName="glance-log" containerID="cri-o://361acb609316369ca05f319244bbf84ef779ab12c43ab51d140a9f1785789d5e" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.530722 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-t99ql" podUID="b1f7e362-6e6b-4636-b551-4533ad037811" containerName="ovn-controller" containerID="cri-o://015bbd0a1a1e9fb405214fe7a35a6c512629833b2d306bd11d97dfd7b5021dee" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.531007 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="7bee7127-9367-4882-8ab1-0493128d2641" containerName="glance-httpd" containerID="cri-o://15caeb74f903a78a3ff675fa24fc2fa63c9da6eab92af97c459eb92425c7c093" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.548580 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7d4894d65-gqnvs"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.549195 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7d4894d65-gqnvs" podUID="151aff2f-7aaa-4964-8f75-51c8faf86397" containerName="neutron-httpd" containerID="cri-o://ec785d0624d75a82e22bd01f7edfc8b3b369f0fa8f2251c36725e8484e0c04f0" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.549138 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7d4894d65-gqnvs" podUID="151aff2f-7aaa-4964-8f75-51c8faf86397" containerName="neutron-api" containerID="cri-o://a6df09e46a84bd7457d48eb96b91f30ec9076cb7712cd5e8e714009a5e5ee6d2" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.551337 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmb7f\" (UniqueName: \"kubernetes.io/projected/ad7bc32b-e1f1-4ce5-a094-56f37d676131-kube-api-access-lmb7f\") pod \"novacell032fc-account-delete-xk7xx\" (UID: \"ad7bc32b-e1f1-4ce5-a094-56f37d676131\") " pod="openstack/novacell032fc-account-delete-xk7xx" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.551365 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts\") pod \"novacell032fc-account-delete-xk7xx\" (UID: \"ad7bc32b-e1f1-4ce5-a094-56f37d676131\") " pod="openstack/novacell032fc-account-delete-xk7xx" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.576245 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.576648 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="account-server" containerID="cri-o://43f3af2bcb6a92ec4e0c79358397d8a0e3515b9b8ec39a557f85c39ba849f2e2" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.576989 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-auditor" containerID="cri-o://c3ca6f2469bf537185f1bdcbca3c0daa0bea4b5850c553e3aa9fc5b77b64d67a" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.576973 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-updater" containerID="cri-o://ee780d1664f73c1b0efc34f94bcba32ef69c9316883ef0a536cf48cc92544c85" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.577035 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-replicator" containerID="cri-o://1e96365ccd71754557edeab3d45001b8fe49eb13fc19f14f1ba33c6eb2378fc2" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.577066 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-server" containerID="cri-o://ce54c7d58ad42d61d735cc7c28384296c4ccdf392def1ceb2994e2fc57811e5e" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.577116 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="container-updater" containerID="cri-o://f4326c863e90d24d084dbd8c33e41f8c3206bed85eb21a3ea86b5f28906b546e" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.577133 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="account-auditor" containerID="cri-o://c31f95cb1b9a065f105c67a07bd5d5b7cf66901a282cda1c3bec560e21d74414" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.577104 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="account-reaper" containerID="cri-o://cba71b9ab843952cf7d72667e396c7374c1e7e44e8883fc3704df6fae16f5f38" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.577166 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="account-replicator" containerID="cri-o://470dd259d2efea986a51d98e27e81921a7309bef3934a73e5e73feb96d784778" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.577182 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="container-auditor" containerID="cri-o://a2fc2baa7d0114402b84cd8afc19e3b6af384d6ec48988a881766a0605e4b9fa" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.577232 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="container-replicator" containerID="cri-o://ac8cee2bfbd683e0bf23daa4541d27abead299ecd058365d22a491cf7e370d73" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.577286 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="rsync" containerID="cri-o://ca6e4b07ddb5ce36fc245cda1d9e032e507ab8f51871ae61fc6d91bf3d94fbcc" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.577320 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="container-server" containerID="cri-o://7c06071c7ab94c9d389a4416468a8dae45a8d25bf695e4f1589bbb97f67b9ff8" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.577350 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="swift-recon-cron" containerID="cri-o://7b7ec0a8594688d16e29d5dbc41ca846eb39e0c6507989d1e1ae16d15e9b54b4" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.577389 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-expirer" containerID="cri-o://4d8e6435aaf1596d23240a79db43f33846ba61b7ae4b65eb49e14339421d4856" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.622227 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.622560 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="a53cec78-89c3-4495-8af6-4caf4f018cc1" containerName="cinder-scheduler" containerID="cri-o://2b1f1cfc83df026dae7bf7bf7c447aef3e892986be530a90b20b933b8fe1c77c" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.623030 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="a53cec78-89c3-4495-8af6-4caf4f018cc1" containerName="probe" containerID="cri-o://5ac6adaa76a02bc0a74df277af75098ac24f5239a90ad5f23966871efb74d2a3" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.638683 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-fcj6h"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.650124 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.650365 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="4477c075-9151-49cc-bb52-82dc34ea46ec" containerName="cinder-api-log" containerID="cri-o://e24143c91b4a17a69c27afa164bb157bee14c4f0597ed2fa5ef6a42ffe793925" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.650417 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="4477c075-9151-49cc-bb52-82dc34ea46ec" containerName="cinder-api" containerID="cri-o://4dd7dcf6024fd47fb7c4424b294f5cadc4f936ab98e05bb09fe4f5e3d7651e94" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.655389 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmb7f\" (UniqueName: \"kubernetes.io/projected/ad7bc32b-e1f1-4ce5-a094-56f37d676131-kube-api-access-lmb7f\") pod \"novacell032fc-account-delete-xk7xx\" (UID: \"ad7bc32b-e1f1-4ce5-a094-56f37d676131\") " pod="openstack/novacell032fc-account-delete-xk7xx" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.655447 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts\") pod \"novacell032fc-account-delete-xk7xx\" (UID: \"ad7bc32b-e1f1-4ce5-a094-56f37d676131\") " pod="openstack/novacell032fc-account-delete-xk7xx" Nov 28 13:44:08 crc kubenswrapper[4857]: E1128 13:44:08.655809 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 13:44:08 crc kubenswrapper[4857]: E1128 13:44:08.655855 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-config-data podName:71cc1f00-1a63-428e-8f12-2136ab077860 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:10.655840381 +0000 UTC m=+1542.683215548 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-config-data") pod "rabbitmq-cell1-server-0" (UID: "71cc1f00-1a63-428e-8f12-2136ab077860") : configmap "rabbitmq-cell1-config-data" not found Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.656924 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts\") pod \"novacell032fc-account-delete-xk7xx\" (UID: \"ad7bc32b-e1f1-4ce5-a094-56f37d676131\") " pod="openstack/novacell032fc-account-delete-xk7xx" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.667404 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-fcj6h"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.691893 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmb7f\" (UniqueName: \"kubernetes.io/projected/ad7bc32b-e1f1-4ce5-a094-56f37d676131-kube-api-access-lmb7f\") pod \"novacell032fc-account-delete-xk7xx\" (UID: \"ad7bc32b-e1f1-4ce5-a094-56f37d676131\") " pod="openstack/novacell032fc-account-delete-xk7xx" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.700974 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-dx7fm"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.711859 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-dx7fm"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.724980 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glanceeef9-account-delete-chvxh"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.763406 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.820762 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-56664b65dc-mkdgh"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.820992 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-56664b65dc-mkdgh" podUID="f411fba7-d7b2-4d97-9388-c1b6f57e8328" containerName="barbican-worker-log" containerID="cri-o://8b3ff8b7cb9bbbd5d33a06e4dc7773db3800416089a3b589b96e2930ebcb5b38" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.821255 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-56664b65dc-mkdgh" podUID="f411fba7-d7b2-4d97-9388-c1b6f57e8328" containerName="barbican-worker" containerID="cri-o://df7bca3ad7fcc3cc2ac1df9b77614d94b72b4db2a23294091359d1e948b3577e" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.846013 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-k7b77_8bb8cc13-eda7-4c41-9878-77ddabd55f4b/openstack-network-exporter/0.log" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.846049 4857 generic.go:334] "Generic (PLEG): container finished" podID="8bb8cc13-eda7-4c41-9878-77ddabd55f4b" containerID="c9dfde4fc40c233928885d1fb977721a4b1687b44081115fcf5a00db9ce6907f" exitCode=2 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.846098 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-k7b77" event={"ID":"8bb8cc13-eda7-4c41-9878-77ddabd55f4b","Type":"ContainerDied","Data":"c9dfde4fc40c233928885d1fb977721a4b1687b44081115fcf5a00db9ce6907f"} Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.849935 4857 generic.go:334] "Generic (PLEG): container finished" podID="07621208-d831-4470-908c-76084c830753" containerID="f53ded071ea01c8120bc4be89662f272bd544a870cac413afe982b426f8f618a" exitCode=137 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.854049 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-9689bdb94-frvhg"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.854292 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" podUID="0d2e145c-5068-4dff-a35e-14fe385cdcf2" containerName="barbican-keystone-listener-log" containerID="cri-o://bf2b4d3b1fd6b8f1241149d7a1019420f1580f221c1eb7ad343a94db93716eb4" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.855118 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" podUID="0d2e145c-5068-4dff-a35e-14fe385cdcf2" containerName="barbican-keystone-listener" containerID="cri-o://89d728886b576deecbaf0ff9d24f62808734b9aa348aaf482ac876e2835345d1" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.870393 4857 generic.go:334] "Generic (PLEG): container finished" podID="7bee7127-9367-4882-8ab1-0493128d2641" containerID="361acb609316369ca05f319244bbf84ef779ab12c43ab51d140a9f1785789d5e" exitCode=143 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.870445 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7bee7127-9367-4882-8ab1-0493128d2641","Type":"ContainerDied","Data":"361acb609316369ca05f319244bbf84ef779ab12c43ab51d140a9f1785789d5e"} Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.870602 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="71cc1f00-1a63-428e-8f12-2136ab077860" containerName="rabbitmq" containerID="cri-o://72d325a6ac77417281a4f0e4c5deaeb2d676cf4b75f4ac8be5b905a3b744677c" gracePeriod=604800 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.904496 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_6133e02f-8ece-4b6b-ac4a-c3871e017c1e/ovsdbserver-nb/0.log" Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.904812 4857 generic.go:334] "Generic (PLEG): container finished" podID="6133e02f-8ece-4b6b-ac4a-c3871e017c1e" containerID="7f7bc0064f521471cf62c8788f876e9c2ad9aae9c8e92b24025dd3c24bcd9aaf" exitCode=143 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.904860 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"6133e02f-8ece-4b6b-ac4a-c3871e017c1e","Type":"ContainerDied","Data":"7f7bc0064f521471cf62c8788f876e9c2ad9aae9c8e92b24025dd3c24bcd9aaf"} Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.909408 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-575548d9c6-4zx6z"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.911945 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-575548d9c6-4zx6z" podUID="0d0c82d5-b320-444c-a4d9-838ca3097157" containerName="barbican-api-log" containerID="cri-o://d880cc69cc93c55dd123da2ed1ba8cf195b6e491b2fba33f24d18a403279c8c6" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.912057 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-575548d9c6-4zx6z" podUID="0d0c82d5-b320-444c-a4d9-838ca3097157" containerName="barbican-api" containerID="cri-o://6307f97c800ac6b026e60dbaa702231a4783caba353d39fc8956c6ce72d5e01e" gracePeriod=30 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.925819 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="e2fec95b-4e40-4761-9d14-6abfeb78d9c0" containerName="ovsdbserver-sb" containerID="cri-o://bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a" gracePeriod=300 Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.952519 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placementcdcd-account-delete-h5qc4"] Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.964413 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-operator-scripts\") pod \"novacell10c56-account-delete-6l7tk\" (UID: \"a81fb5f5-33d2-4da6-86a6-d2f248a3364f\") " pod="openstack/novacell10c56-account-delete-6l7tk" Nov 28 13:44:08 crc kubenswrapper[4857]: E1128 13:44:08.964710 4857 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Nov 28 13:44:08 crc kubenswrapper[4857]: E1128 13:44:08.964778 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-operator-scripts podName:a81fb5f5-33d2-4da6-86a6-d2f248a3364f nodeName:}" failed. No retries permitted until 2025-11-28 13:44:09.964764124 +0000 UTC m=+1541.992139281 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-operator-scripts") pod "novacell10c56-account-delete-6l7tk" (UID: "a81fb5f5-33d2-4da6-86a6-d2f248a3364f") : configmap "openstack-cell1-scripts" not found Nov 28 13:44:08 crc kubenswrapper[4857]: I1128 13:44:08.985156 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.040877 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.041163 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="310b8699-5d0c-4cce-b8fd-90ccedc2ce85" containerName="nova-scheduler-scheduler" containerID="cri-o://8b4e711ca231fe7452ef2052a4b12b122b8a749ee4670a4b691165e5f548e102" gracePeriod=30 Nov 28 13:44:09 crc kubenswrapper[4857]: E1128 13:44:09.082099 4857 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 28 13:44:09 crc kubenswrapper[4857]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 28 13:44:09 crc kubenswrapper[4857]: + source /usr/local/bin/container-scripts/functions Nov 28 13:44:09 crc kubenswrapper[4857]: ++ OVNBridge=br-int Nov 28 13:44:09 crc kubenswrapper[4857]: ++ OVNRemote=tcp:localhost:6642 Nov 28 13:44:09 crc kubenswrapper[4857]: ++ OVNEncapType=geneve Nov 28 13:44:09 crc kubenswrapper[4857]: ++ OVNAvailabilityZones= Nov 28 13:44:09 crc kubenswrapper[4857]: ++ EnableChassisAsGateway=true Nov 28 13:44:09 crc kubenswrapper[4857]: ++ PhysicalNetworks= Nov 28 13:44:09 crc kubenswrapper[4857]: ++ OVNHostName= Nov 28 13:44:09 crc kubenswrapper[4857]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 28 13:44:09 crc kubenswrapper[4857]: ++ ovs_dir=/var/lib/openvswitch Nov 28 13:44:09 crc kubenswrapper[4857]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 28 13:44:09 crc kubenswrapper[4857]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 28 13:44:09 crc kubenswrapper[4857]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 13:44:09 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:44:09 crc kubenswrapper[4857]: + sleep 0.5 Nov 28 13:44:09 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:44:09 crc kubenswrapper[4857]: + sleep 0.5 Nov 28 13:44:09 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:44:09 crc kubenswrapper[4857]: + cleanup_ovsdb_server_semaphore Nov 28 13:44:09 crc kubenswrapper[4857]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 13:44:09 crc kubenswrapper[4857]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 28 13:44:09 crc kubenswrapper[4857]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-ph2cf" message=< Nov 28 13:44:09 crc kubenswrapper[4857]: Exiting ovsdb-server (5) ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 28 13:44:09 crc kubenswrapper[4857]: + source /usr/local/bin/container-scripts/functions Nov 28 13:44:09 crc kubenswrapper[4857]: ++ OVNBridge=br-int Nov 28 13:44:09 crc kubenswrapper[4857]: ++ OVNRemote=tcp:localhost:6642 Nov 28 13:44:09 crc kubenswrapper[4857]: ++ OVNEncapType=geneve Nov 28 13:44:09 crc kubenswrapper[4857]: ++ OVNAvailabilityZones= Nov 28 13:44:09 crc kubenswrapper[4857]: ++ EnableChassisAsGateway=true Nov 28 13:44:09 crc kubenswrapper[4857]: ++ PhysicalNetworks= Nov 28 13:44:09 crc kubenswrapper[4857]: ++ OVNHostName= Nov 28 13:44:09 crc kubenswrapper[4857]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 28 13:44:09 crc kubenswrapper[4857]: ++ ovs_dir=/var/lib/openvswitch Nov 28 13:44:09 crc kubenswrapper[4857]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 28 13:44:09 crc kubenswrapper[4857]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 28 13:44:09 crc kubenswrapper[4857]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 13:44:09 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:44:09 crc kubenswrapper[4857]: + sleep 0.5 Nov 28 13:44:09 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:44:09 crc kubenswrapper[4857]: + sleep 0.5 Nov 28 13:44:09 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:44:09 crc kubenswrapper[4857]: + cleanup_ovsdb_server_semaphore Nov 28 13:44:09 crc kubenswrapper[4857]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 13:44:09 crc kubenswrapper[4857]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 28 13:44:09 crc kubenswrapper[4857]: > Nov 28 13:44:09 crc kubenswrapper[4857]: E1128 13:44:09.082135 4857 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 28 13:44:09 crc kubenswrapper[4857]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 28 13:44:09 crc kubenswrapper[4857]: + source /usr/local/bin/container-scripts/functions Nov 28 13:44:09 crc kubenswrapper[4857]: ++ OVNBridge=br-int Nov 28 13:44:09 crc kubenswrapper[4857]: ++ OVNRemote=tcp:localhost:6642 Nov 28 13:44:09 crc kubenswrapper[4857]: ++ OVNEncapType=geneve Nov 28 13:44:09 crc kubenswrapper[4857]: ++ OVNAvailabilityZones= Nov 28 13:44:09 crc kubenswrapper[4857]: ++ EnableChassisAsGateway=true Nov 28 13:44:09 crc kubenswrapper[4857]: ++ PhysicalNetworks= Nov 28 13:44:09 crc kubenswrapper[4857]: ++ OVNHostName= Nov 28 13:44:09 crc kubenswrapper[4857]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 28 13:44:09 crc kubenswrapper[4857]: ++ ovs_dir=/var/lib/openvswitch Nov 28 13:44:09 crc kubenswrapper[4857]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 28 13:44:09 crc kubenswrapper[4857]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 28 13:44:09 crc kubenswrapper[4857]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 13:44:09 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:44:09 crc kubenswrapper[4857]: + sleep 0.5 Nov 28 13:44:09 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:44:09 crc kubenswrapper[4857]: + sleep 0.5 Nov 28 13:44:09 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:44:09 crc kubenswrapper[4857]: + cleanup_ovsdb_server_semaphore Nov 28 13:44:09 crc kubenswrapper[4857]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 13:44:09 crc kubenswrapper[4857]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 28 13:44:09 crc kubenswrapper[4857]: > pod="openstack/ovn-controller-ovs-ph2cf" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovsdb-server" containerID="cri-o://7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.082189 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-ph2cf" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovsdb-server" containerID="cri-o://7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" gracePeriod=29 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.082261 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftjvm\" (UniqueName: \"kubernetes.io/projected/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-kube-api-access-ftjvm\") pod \"novacell10c56-account-delete-6l7tk\" (UID: \"a81fb5f5-33d2-4da6-86a6-d2f248a3364f\") " pod="openstack/novacell10c56-account-delete-6l7tk" Nov 28 13:44:09 crc kubenswrapper[4857]: E1128 13:44:09.088684 4857 projected.go:194] Error preparing data for projected volume kube-api-access-ftjvm for pod openstack/novacell10c56-account-delete-6l7tk: failed to fetch token: serviceaccounts "galera-openstack-cell1" not found Nov 28 13:44:09 crc kubenswrapper[4857]: E1128 13:44:09.088773 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-kube-api-access-ftjvm podName:a81fb5f5-33d2-4da6-86a6-d2f248a3364f nodeName:}" failed. No retries permitted until 2025-11-28 13:44:10.088723352 +0000 UTC m=+1542.116098519 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-ftjvm" (UniqueName: "kubernetes.io/projected/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-kube-api-access-ftjvm") pod "novacell10c56-account-delete-6l7tk" (UID: "a81fb5f5-33d2-4da6-86a6-d2f248a3364f") : failed to fetch token: serviceaccounts "galera-openstack-cell1" not found Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.132473 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-ph2cf" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovs-vswitchd" containerID="cri-o://8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" gracePeriod=29 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.133707 4857 generic.go:334] "Generic (PLEG): container finished" podID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerID="a2fc2baa7d0114402b84cd8afc19e3b6af384d6ec48988a881766a0605e4b9fa" exitCode=0 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.133728 4857 generic.go:334] "Generic (PLEG): container finished" podID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerID="c3ca6f2469bf537185f1bdcbca3c0daa0bea4b5850c553e3aa9fc5b77b64d67a" exitCode=0 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.133737 4857 generic.go:334] "Generic (PLEG): container finished" podID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerID="1e96365ccd71754557edeab3d45001b8fe49eb13fc19f14f1ba33c6eb2378fc2" exitCode=0 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.133744 4857 generic.go:334] "Generic (PLEG): container finished" podID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerID="cba71b9ab843952cf7d72667e396c7374c1e7e44e8883fc3704df6fae16f5f38" exitCode=0 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.133766 4857 generic.go:334] "Generic (PLEG): container finished" podID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerID="470dd259d2efea986a51d98e27e81921a7309bef3934a73e5e73feb96d784778" exitCode=0 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.133825 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerDied","Data":"a2fc2baa7d0114402b84cd8afc19e3b6af384d6ec48988a881766a0605e4b9fa"} Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.133850 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerDied","Data":"c3ca6f2469bf537185f1bdcbca3c0daa0bea4b5850c553e3aa9fc5b77b64d67a"} Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.133862 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerDied","Data":"1e96365ccd71754557edeab3d45001b8fe49eb13fc19f14f1ba33c6eb2378fc2"} Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.133871 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerDied","Data":"cba71b9ab843952cf7d72667e396c7374c1e7e44e8883fc3704df6fae16f5f38"} Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.133880 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerDied","Data":"470dd259d2efea986a51d98e27e81921a7309bef3934a73e5e73feb96d784778"} Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.171592 4857 generic.go:334] "Generic (PLEG): container finished" podID="b1f7e362-6e6b-4636-b551-4533ad037811" containerID="015bbd0a1a1e9fb405214fe7a35a6c512629833b2d306bd11d97dfd7b5021dee" exitCode=0 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.171658 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-t99ql" event={"ID":"b1f7e362-6e6b-4636-b551-4533ad037811","Type":"ContainerDied","Data":"015bbd0a1a1e9fb405214fe7a35a6c512629833b2d306bd11d97dfd7b5021dee"} Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.178572 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 13:44:09 crc kubenswrapper[4857]: E1128 13:44:09.186636 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 13:44:09 crc kubenswrapper[4857]: E1128 13:44:09.186865 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-config-data podName:cfbd0457-d459-4bf2-bdaf-8b61db5cce65 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:11.186851531 +0000 UTC m=+1543.214226688 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-config-data") pod "rabbitmq-server-0" (UID: "cfbd0457-d459-4bf2-bdaf-8b61db5cce65") : configmap "rabbitmq-config-data" not found Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.199215 4857 generic.go:334] "Generic (PLEG): container finished" podID="3a952329-a8d9-432d-ac5b-d88b7e2ede6b" containerID="990eadd5834f267197096c5bc4a36f6e0524a5b8386ca4956f7f56c8c34c8ce5" exitCode=143 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.199322 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3a952329-a8d9-432d-ac5b-d88b7e2ede6b","Type":"ContainerDied","Data":"990eadd5834f267197096c5bc4a36f6e0524a5b8386ca4956f7f56c8c34c8ce5"} Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.210190 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.213023 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="64da16e3-099d-4def-9656-91f40d64672f" containerName="nova-api-log" containerID="cri-o://ee074130ed95276ff4a950681c7df3344a6e4c3aa86435eb4b8f9e471126f272" gracePeriod=30 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.213622 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="64da16e3-099d-4def-9656-91f40d64672f" containerName="nova-api-api" containerID="cri-o://1146e3ec8a4d803ee31e0a88958bb4723468c3f9bc7e9a7d393734acda6d6b4a" gracePeriod=30 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.232931 4857 generic.go:334] "Generic (PLEG): container finished" podID="946c0669-4c99-46b7-a9ff-437042383642" containerID="9f2936fe928f6000c1df2fe80515f9fd71cc2a258636283c70afbe2ab56dcf0b" exitCode=143 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.233539 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-749fd8cf96-rbd6r" event={"ID":"946c0669-4c99-46b7-a9ff-437042383642","Type":"ContainerDied","Data":"9f2936fe928f6000c1df2fe80515f9fd71cc2a258636283c70afbe2ab56dcf0b"} Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.254010 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.254421 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" containerName="nova-metadata-log" containerID="cri-o://e575a5748441c404e5228a5c2146f98ab1fd6c5ae67eb9523fedd879f306a6a7" gracePeriod=30 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.263996 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" containerName="nova-metadata-metadata" containerID="cri-o://ec5937438716528a8aa131c5d6bf8c9a57f6a24f30318571c52d136b077dfcf7" gracePeriod=30 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.274447 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-m8bx4"] Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.281962 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_e2fec95b-4e40-4761-9d14-6abfeb78d9c0/ovsdbserver-sb/0.log" Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.282115 4857 generic.go:334] "Generic (PLEG): container finished" podID="e2fec95b-4e40-4761-9d14-6abfeb78d9c0" containerID="8dea0ac70575cce5895b3d523d663e080996fff826c13ed3d4f241bb7bf27649" exitCode=2 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.282303 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"e2fec95b-4e40-4761-9d14-6abfeb78d9c0","Type":"ContainerDied","Data":"8dea0ac70575cce5895b3d523d663e080996fff826c13ed3d4f241bb7bf27649"} Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.286312 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-0c56-account-create-update-6lpj4"] Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.296475 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-m8bx4"] Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.314149 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-0c56-account-create-update-6lpj4"] Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.316270 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell10c56-account-delete-6l7tk"] Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.325265 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.325480 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="b6a593cc-74b3-4a02-ba7a-f4c5d7400476" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://a840981835cecbc52064fb805056a67f1d699c2bf561f689da71d182309a6ea3" gracePeriod=30 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.329461 4857 generic.go:334] "Generic (PLEG): container finished" podID="0cb677df-7237-4b82-8806-d7abedfad40c" containerID="54ebf8119e8b85e98e03f36c99c69b277451e9d96ff0dda0b092e460eb535292" exitCode=0 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.329494 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" event={"ID":"0cb677df-7237-4b82-8806-d7abedfad40c","Type":"ContainerDied","Data":"54ebf8119e8b85e98e03f36c99c69b277451e9d96ff0dda0b092e460eb535292"} Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.330949 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="cfbd0457-d459-4bf2-bdaf-8b61db5cce65" containerName="rabbitmq" containerID="cri-o://b4dc40ec2aafb3b05e54fb73bbf1e3fb91135c9bbf7ec2c351e4ea6cea29e654" gracePeriod=604800 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.332244 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rhznt"] Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.341042 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rhznt"] Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.355865 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.356074 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="f7bf9e28-fd40-4b0d-aac9-995eff12a115" containerName="nova-cell1-conductor-conductor" containerID="cri-o://af2a21437b5950c07391db6d069bd153d9b422fd5daa52cd346a87417d643f35" gracePeriod=30 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.382825 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.383078 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd" containerName="nova-cell0-conductor-conductor" containerID="cri-o://165d4ed1ed3f722b3e2c4f5769e7c056c9eafbeeed14d27d5233ab4f09a4ef4d" gracePeriod=30 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.390085 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fb46r"] Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.404295 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fb46r"] Nov 28 13:44:09 crc kubenswrapper[4857]: E1128 13:44:09.407933 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a is running failed: container process not found" containerID="bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 28 13:44:09 crc kubenswrapper[4857]: E1128 13:44:09.410314 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a is running failed: container process not found" containerID="bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 28 13:44:09 crc kubenswrapper[4857]: E1128 13:44:09.413155 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a is running failed: container process not found" containerID="bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 28 13:44:09 crc kubenswrapper[4857]: E1128 13:44:09.413211 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a is running failed: container process not found" probeType="Readiness" pod="openstack/ovsdbserver-sb-0" podUID="e2fec95b-4e40-4761-9d14-6abfeb78d9c0" containerName="ovsdbserver-sb" Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.466047 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron5867-account-delete-58twd"] Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.539940 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="41687469-06d7-47ab-ad25-d32df165e1e2" containerName="galera" containerID="cri-o://9edd8ca732119343257da06c9d3c8090ac7032d415e0af5cc821df9c9c20bf76" gracePeriod=30 Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.682303 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi7cc9-account-delete-qjqg5" Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.748324 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-cell1-novncproxy-0" podUID="b6a593cc-74b3-4a02-ba7a-f4c5d7400476" containerName="nova-cell1-novncproxy-novncproxy" probeResult="failure" output="Get \"https://10.217.0.195:6080/vnc_lite.html\": dial tcp 10.217.0.195:6080: connect: connection refused" Nov 28 13:44:09 crc kubenswrapper[4857]: E1128 13:44:09.830539 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-ftjvm operator-scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/novacell10c56-account-delete-6l7tk" podUID="a81fb5f5-33d2-4da6-86a6-d2f248a3364f" Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.833957 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell032fc-account-delete-xk7xx" Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.837195 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_6133e02f-8ece-4b6b-ac4a-c3871e017c1e/ovsdbserver-nb/0.log" Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.837266 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.871190 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.873313 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-k7b77_8bb8cc13-eda7-4c41-9878-77ddabd55f4b/openstack-network-exporter/0.log" Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.873353 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:44:09 crc kubenswrapper[4857]: I1128 13:44:09.878585 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.008895 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-ovsdbserver-sb\") pod \"0cb677df-7237-4b82-8806-d7abedfad40c\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.008959 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-ovsdb-rundir\") pod \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.008998 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-combined-ca-bundle\") pod \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009023 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/07621208-d831-4470-908c-76084c830753-openstack-config\") pod \"07621208-d831-4470-908c-76084c830753\" (UID: \"07621208-d831-4470-908c-76084c830753\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009063 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-dns-svc\") pod \"0cb677df-7237-4b82-8806-d7abedfad40c\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009092 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rwgcx\" (UniqueName: \"kubernetes.io/projected/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-kube-api-access-rwgcx\") pod \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009126 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-config\") pod \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009143 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zkdm\" (UniqueName: \"kubernetes.io/projected/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-kube-api-access-9zkdm\") pod \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009161 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-dns-swift-storage-0\") pod \"0cb677df-7237-4b82-8806-d7abedfad40c\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009190 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82xwn\" (UniqueName: \"kubernetes.io/projected/0cb677df-7237-4b82-8806-d7abedfad40c-kube-api-access-82xwn\") pod \"0cb677df-7237-4b82-8806-d7abedfad40c\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009215 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xkvx\" (UniqueName: \"kubernetes.io/projected/07621208-d831-4470-908c-76084c830753-kube-api-access-9xkvx\") pod \"07621208-d831-4470-908c-76084c830753\" (UID: \"07621208-d831-4470-908c-76084c830753\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009234 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/07621208-d831-4470-908c-76084c830753-openstack-config-secret\") pod \"07621208-d831-4470-908c-76084c830753\" (UID: \"07621208-d831-4470-908c-76084c830753\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009250 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-scripts\") pod \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009292 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-ovs-rundir\") pod \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009310 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-metrics-certs-tls-certs\") pod \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009328 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-config\") pod \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009347 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07621208-d831-4470-908c-76084c830753-combined-ca-bundle\") pod \"07621208-d831-4470-908c-76084c830753\" (UID: \"07621208-d831-4470-908c-76084c830753\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009364 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-ovn-rundir\") pod \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\" (UID: \"8bb8cc13-eda7-4c41-9878-77ddabd55f4b\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009386 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-ovsdbserver-nb-tls-certs\") pod \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009405 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-config\") pod \"0cb677df-7237-4b82-8806-d7abedfad40c\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009435 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009457 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-metrics-certs-tls-certs\") pod \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009483 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-combined-ca-bundle\") pod \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\" (UID: \"6133e02f-8ece-4b6b-ac4a-c3871e017c1e\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009507 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-ovsdbserver-nb\") pod \"0cb677df-7237-4b82-8806-d7abedfad40c\" (UID: \"0cb677df-7237-4b82-8806-d7abedfad40c\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.009852 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-operator-scripts\") pod \"novacell10c56-account-delete-6l7tk\" (UID: \"a81fb5f5-33d2-4da6-86a6-d2f248a3364f\") " pod="openstack/novacell10c56-account-delete-6l7tk" Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.010051 4857 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.010092 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-operator-scripts podName:a81fb5f5-33d2-4da6-86a6-d2f248a3364f nodeName:}" failed. No retries permitted until 2025-11-28 13:44:12.010077784 +0000 UTC m=+1544.037452941 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-operator-scripts") pod "novacell10c56-account-delete-6l7tk" (UID: "a81fb5f5-33d2-4da6-86a6-d2f248a3364f") : configmap "openstack-cell1-scripts" not found Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.010131 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "8bb8cc13-eda7-4c41-9878-77ddabd55f4b" (UID: "8bb8cc13-eda7-4c41-9878-77ddabd55f4b"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.010666 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-scripts" (OuterVolumeSpecName: "scripts") pod "6133e02f-8ece-4b6b-ac4a-c3871e017c1e" (UID: "6133e02f-8ece-4b6b-ac4a-c3871e017c1e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.010833 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "8bb8cc13-eda7-4c41-9878-77ddabd55f4b" (UID: "8bb8cc13-eda7-4c41-9878-77ddabd55f4b"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.012079 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-config" (OuterVolumeSpecName: "config") pod "6133e02f-8ece-4b6b-ac4a-c3871e017c1e" (UID: "6133e02f-8ece-4b6b-ac4a-c3871e017c1e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.012951 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "6133e02f-8ece-4b6b-ac4a-c3871e017c1e" (UID: "6133e02f-8ece-4b6b-ac4a-c3871e017c1e"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.014080 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-config" (OuterVolumeSpecName: "config") pod "8bb8cc13-eda7-4c41-9878-77ddabd55f4b" (UID: "8bb8cc13-eda7-4c41-9878-77ddabd55f4b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.024853 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "6133e02f-8ece-4b6b-ac4a-c3871e017c1e" (UID: "6133e02f-8ece-4b6b-ac4a-c3871e017c1e"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.049077 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-kube-api-access-9zkdm" (OuterVolumeSpecName: "kube-api-access-9zkdm") pod "8bb8cc13-eda7-4c41-9878-77ddabd55f4b" (UID: "8bb8cc13-eda7-4c41-9878-77ddabd55f4b"). InnerVolumeSpecName "kube-api-access-9zkdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.070528 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0cb677df-7237-4b82-8806-d7abedfad40c-kube-api-access-82xwn" (OuterVolumeSpecName: "kube-api-access-82xwn") pod "0cb677df-7237-4b82-8806-d7abedfad40c" (UID: "0cb677df-7237-4b82-8806-d7abedfad40c"). InnerVolumeSpecName "kube-api-access-82xwn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.072659 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07621208-d831-4470-908c-76084c830753-kube-api-access-9xkvx" (OuterVolumeSpecName: "kube-api-access-9xkvx") pod "07621208-d831-4470-908c-76084c830753" (UID: "07621208-d831-4470-908c-76084c830753"). InnerVolumeSpecName "kube-api-access-9xkvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.113732 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftjvm\" (UniqueName: \"kubernetes.io/projected/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-kube-api-access-ftjvm\") pod \"novacell10c56-account-delete-6l7tk\" (UID: \"a81fb5f5-33d2-4da6-86a6-d2f248a3364f\") " pod="openstack/novacell10c56-account-delete-6l7tk" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.114008 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.121704 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.121789 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zkdm\" (UniqueName: \"kubernetes.io/projected/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-kube-api-access-9zkdm\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.121846 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82xwn\" (UniqueName: \"kubernetes.io/projected/0cb677df-7237-4b82-8806-d7abedfad40c-kube-api-access-82xwn\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.121898 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xkvx\" (UniqueName: \"kubernetes.io/projected/07621208-d831-4470-908c-76084c830753-kube-api-access-9xkvx\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.121948 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.122010 4857 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-ovs-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.122066 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.122118 4857 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.122186 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.123044 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-kube-api-access-rwgcx" (OuterVolumeSpecName: "kube-api-access-rwgcx") pod "6133e02f-8ece-4b6b-ac4a-c3871e017c1e" (UID: "6133e02f-8ece-4b6b-ac4a-c3871e017c1e"). InnerVolumeSpecName "kube-api-access-rwgcx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.129741 4857 projected.go:194] Error preparing data for projected volume kube-api-access-ftjvm for pod openstack/novacell10c56-account-delete-6l7tk: failed to fetch token: serviceaccounts "galera-openstack-cell1" not found Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.130091 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-kube-api-access-ftjvm podName:a81fb5f5-33d2-4da6-86a6-d2f248a3364f nodeName:}" failed. No retries permitted until 2025-11-28 13:44:12.130070728 +0000 UTC m=+1544.157445895 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-ftjvm" (UniqueName: "kubernetes.io/projected/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-kube-api-access-ftjvm") pod "novacell10c56-account-delete-6l7tk" (UID: "a81fb5f5-33d2-4da6-86a6-d2f248a3364f") : failed to fetch token: serviceaccounts "galera-openstack-cell1" not found Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.140525 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_e2fec95b-4e40-4761-9d14-6abfeb78d9c0/ovsdbserver-sb/0.log" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.140592 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.210455 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07621208-d831-4470-908c-76084c830753-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "07621208-d831-4470-908c-76084c830753" (UID: "07621208-d831-4470-908c-76084c830753"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.230907 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rwgcx\" (UniqueName: \"kubernetes.io/projected/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-kube-api-access-rwgcx\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.230931 4857 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/07621208-d831-4470-908c-76084c830753-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.244807 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0cb677df-7237-4b82-8806-d7abedfad40c" (UID: "0cb677df-7237-4b82-8806-d7abedfad40c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.245450 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.264960 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8bb8cc13-eda7-4c41-9878-77ddabd55f4b" (UID: "8bb8cc13-eda7-4c41-9878-77ddabd55f4b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.340055 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-ovsdbserver-sb-tls-certs\") pod \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.340638 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.340768 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-config\") pod \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.340883 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-combined-ca-bundle\") pod \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.340972 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4qlz\" (UniqueName: \"kubernetes.io/projected/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-kube-api-access-v4qlz\") pod \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.341039 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-scripts\") pod \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.341127 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-metrics-certs-tls-certs\") pod \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.341210 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-ovsdb-rundir\") pod \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\" (UID: \"e2fec95b-4e40-4761-9d14-6abfeb78d9c0\") " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.342588 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.342680 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-scripts" (OuterVolumeSpecName: "scripts") pod "e2fec95b-4e40-4761-9d14-6abfeb78d9c0" (UID: "e2fec95b-4e40-4761-9d14-6abfeb78d9c0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.342699 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.342729 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-config" (OuterVolumeSpecName: "config") pod "e2fec95b-4e40-4761-9d14-6abfeb78d9c0" (UID: "e2fec95b-4e40-4761-9d14-6abfeb78d9c0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.342758 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.349309 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "e2fec95b-4e40-4761-9d14-6abfeb78d9c0" (UID: "e2fec95b-4e40-4761-9d14-6abfeb78d9c0"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.358459 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-k7b77_8bb8cc13-eda7-4c41-9878-77ddabd55f4b/openstack-network-exporter/0.log" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.358573 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-k7b77" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.366362 4857 generic.go:334] "Generic (PLEG): container finished" podID="64da16e3-099d-4def-9656-91f40d64672f" containerID="ee074130ed95276ff4a950681c7df3344a6e4c3aa86435eb4b8f9e471126f272" exitCode=143 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.384909 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10b0ab88-8db9-4c9d-bc03-a9da374a33ca" path="/var/lib/kubelet/pods/10b0ab88-8db9-4c9d-bc03-a9da374a33ca/volumes" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.385156 4857 generic.go:334] "Generic (PLEG): container finished" podID="ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" containerID="e575a5748441c404e5228a5c2146f98ab1fd6c5ae67eb9523fedd879f306a6a7" exitCode=143 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.387368 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54d85556-ca88-4cbe-9aab-b5505d75d5ed" path="/var/lib/kubelet/pods/54d85556-ca88-4cbe-9aab-b5505d75d5ed/volumes" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.392528 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b35e465-15a4-4f5a-a53a-2fb23b2edeb7" path="/var/lib/kubelet/pods/7b35e465-15a4-4f5a-a53a-2fb23b2edeb7/volumes" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.392963 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07621208-d831-4470-908c-76084c830753-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "07621208-d831-4470-908c-76084c830753" (UID: "07621208-d831-4470-908c-76084c830753"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.393052 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c62325d-5f8d-4477-9369-5a39a3a0bfc8" path="/var/lib/kubelet/pods/7c62325d-5f8d-4477-9369-5a39a3a0bfc8/volumes" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.393713 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b208ca3d-5127-4e9d-ba17-a68dc507f085" path="/var/lib/kubelet/pods/b208ca3d-5127-4e9d-ba17-a68dc507f085/volumes" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.394080 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "e2fec95b-4e40-4761-9d14-6abfeb78d9c0" (UID: "e2fec95b-4e40-4761-9d14-6abfeb78d9c0"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.394216 4857 generic.go:334] "Generic (PLEG): container finished" podID="a53cec78-89c3-4495-8af6-4caf4f018cc1" containerID="5ac6adaa76a02bc0a74df277af75098ac24f5239a90ad5f23966871efb74d2a3" exitCode=0 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.394237 4857 generic.go:334] "Generic (PLEG): container finished" podID="a53cec78-89c3-4495-8af6-4caf4f018cc1" containerID="2b1f1cfc83df026dae7bf7bf7c447aef3e892986be530a90b20b933b8fe1c77c" exitCode=0 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.397223 4857 generic.go:334] "Generic (PLEG): container finished" podID="f411fba7-d7b2-4d97-9388-c1b6f57e8328" containerID="df7bca3ad7fcc3cc2ac1df9b77614d94b72b4db2a23294091359d1e948b3577e" exitCode=0 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.397245 4857 generic.go:334] "Generic (PLEG): container finished" podID="f411fba7-d7b2-4d97-9388-c1b6f57e8328" containerID="8b3ff8b7cb9bbbd5d33a06e4dc7773db3800416089a3b589b96e2930ebcb5b38" exitCode=143 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.398313 4857 generic.go:334] "Generic (PLEG): container finished" podID="b6a593cc-74b3-4a02-ba7a-f4c5d7400476" containerID="a840981835cecbc52064fb805056a67f1d699c2bf561f689da71d182309a6ea3" exitCode=0 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.400634 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f43bd8e8-a5d3-4575-894a-8df5746b831d" path="/var/lib/kubelet/pods/f43bd8e8-a5d3-4575-894a-8df5746b831d/volumes" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.406161 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff6d87b7-1400-461b-ab0c-e122e6e2a5e5" path="/var/lib/kubelet/pods/ff6d87b7-1400-461b-ab0c-e122e6e2a5e5/volumes" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.406974 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.415018 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-kube-api-access-v4qlz" (OuterVolumeSpecName: "kube-api-access-v4qlz") pod "e2fec95b-4e40-4761-9d14-6abfeb78d9c0" (UID: "e2fec95b-4e40-4761-9d14-6abfeb78d9c0"). InnerVolumeSpecName "kube-api-access-v4qlz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.418823 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_6133e02f-8ece-4b6b-ac4a-c3871e017c1e/ovsdbserver-nb/0.log" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.418951 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.444706 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4qlz\" (UniqueName: \"kubernetes.io/projected/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-kube-api-access-v4qlz\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.456459 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.456471 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.456483 4857 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/07621208-d831-4470-908c-76084c830753-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.456523 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.456532 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.492161 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_e2fec95b-4e40-4761-9d14-6abfeb78d9c0/ovsdbserver-sb/0.log" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.492270 4857 generic.go:334] "Generic (PLEG): container finished" podID="e2fec95b-4e40-4761-9d14-6abfeb78d9c0" containerID="bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a" exitCode=143 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.492411 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.499721 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glanceeef9-account-delete-chvxh" podStartSLOduration=4.499702878 podStartE2EDuration="4.499702878s" podCreationTimestamp="2025-11-28 13:44:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:44:10.477242446 +0000 UTC m=+1542.504617613" watchObservedRunningTime="2025-11-28 13:44:10.499702878 +0000 UTC m=+1542.527078045" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.502623 4857 generic.go:334] "Generic (PLEG): container finished" podID="151aff2f-7aaa-4964-8f75-51c8faf86397" containerID="ec785d0624d75a82e22bd01f7edfc8b3b369f0fa8f2251c36725e8484e0c04f0" exitCode=0 Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.519857 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.520262 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.520542 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.520624 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-ph2cf" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovsdb-server" Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.528096 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.538410 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "6133e02f-8ece-4b6b-ac4a-c3871e017c1e" (UID: "6133e02f-8ece-4b6b-ac4a-c3871e017c1e"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.540844 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.543925 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07621208-d831-4470-908c-76084c830753-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "07621208-d831-4470-908c-76084c830753" (UID: "07621208-d831-4470-908c-76084c830753"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.544908 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.545089 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-ph2cf" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovs-vswitchd" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.549330 4857 generic.go:334] "Generic (PLEG): container finished" podID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerID="f4326c863e90d24d084dbd8c33e41f8c3206bed85eb21a3ea86b5f28906b546e" exitCode=0 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.549483 4857 generic.go:334] "Generic (PLEG): container finished" podID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerID="ac8cee2bfbd683e0bf23daa4541d27abead299ecd058365d22a491cf7e370d73" exitCode=0 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.549551 4857 generic.go:334] "Generic (PLEG): container finished" podID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerID="7c06071c7ab94c9d389a4416468a8dae45a8d25bf695e4f1589bbb97f67b9ff8" exitCode=0 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.549610 4857 generic.go:334] "Generic (PLEG): container finished" podID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerID="ca6e4b07ddb5ce36fc245cda1d9e032e507ab8f51871ae61fc6d91bf3d94fbcc" exitCode=0 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.549721 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "8bb8cc13-eda7-4c41-9878-77ddabd55f4b" (UID: "8bb8cc13-eda7-4c41-9878-77ddabd55f4b"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.549828 4857 generic.go:334] "Generic (PLEG): container finished" podID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerID="4d8e6435aaf1596d23240a79db43f33846ba61b7ae4b65eb49e14339421d4856" exitCode=0 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.549898 4857 generic.go:334] "Generic (PLEG): container finished" podID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerID="ee780d1664f73c1b0efc34f94bcba32ef69c9316883ef0a536cf48cc92544c85" exitCode=0 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.549975 4857 generic.go:334] "Generic (PLEG): container finished" podID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerID="ce54c7d58ad42d61d735cc7c28384296c4ccdf392def1ceb2994e2fc57811e5e" exitCode=0 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.550213 4857 generic.go:334] "Generic (PLEG): container finished" podID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerID="c31f95cb1b9a065f105c67a07bd5d5b7cf66901a282cda1c3bec560e21d74414" exitCode=0 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.550296 4857 generic.go:334] "Generic (PLEG): container finished" podID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerID="43f3af2bcb6a92ec4e0c79358397d8a0e3515b9b8ec39a557f85c39ba849f2e2" exitCode=0 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.558382 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.559429 4857 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bb8cc13-eda7-4c41-9878-77ddabd55f4b-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.559725 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07621208-d831-4470-908c-76084c830753-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.559757 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.567510 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-config" (OuterVolumeSpecName: "config") pod "0cb677df-7237-4b82-8806-d7abedfad40c" (UID: "0cb677df-7237-4b82-8806-d7abedfad40c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.569358 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d8bcc929d2d6bab5c64bef5dbfc2e3fbdfbb52f64ab6c01fd92f824932851397" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.571399 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0cb677df-7237-4b82-8806-d7abedfad40c" (UID: "0cb677df-7237-4b82-8806-d7abedfad40c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.575013 4857 generic.go:334] "Generic (PLEG): container finished" podID="4477c075-9151-49cc-bb52-82dc34ea46ec" containerID="e24143c91b4a17a69c27afa164bb157bee14c4f0597ed2fa5ef6a42ffe793925" exitCode=143 Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.586145 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d8bcc929d2d6bab5c64bef5dbfc2e3fbdfbb52f64ab6c01fd92f824932851397" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.590614 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6133e02f-8ece-4b6b-ac4a-c3871e017c1e" (UID: "6133e02f-8ece-4b6b-ac4a-c3871e017c1e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.597656 4857 generic.go:334] "Generic (PLEG): container finished" podID="0d0c82d5-b320-444c-a4d9-838ca3097157" containerID="d880cc69cc93c55dd123da2ed1ba8cf195b6e491b2fba33f24d18a403279c8c6" exitCode=143 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.600395 4857 generic.go:334] "Generic (PLEG): container finished" podID="c80a8609-29af-4833-856c-ee4094abcc0c" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" exitCode=0 Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.602143 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d8bcc929d2d6bab5c64bef5dbfc2e3fbdfbb52f64ab6c01fd92f824932851397" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.602256 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="076d849e-fd88-4add-a5f9-e45a1983a606" containerName="ovn-northd" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.632353 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.641076 4857 generic.go:334] "Generic (PLEG): container finished" podID="0d2e145c-5068-4dff-a35e-14fe385cdcf2" containerID="89d728886b576deecbaf0ff9d24f62808734b9aa348aaf482ac876e2835345d1" exitCode=0 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.641108 4857 generic.go:334] "Generic (PLEG): container finished" podID="0d2e145c-5068-4dff-a35e-14fe385cdcf2" containerID="bf2b4d3b1fd6b8f1241149d7a1019420f1580f221c1eb7ad343a94db93716eb4" exitCode=143 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.641171 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell10c56-account-delete-6l7tk" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.641246 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0cb677df-7237-4b82-8806-d7abedfad40c" (UID: "0cb677df-7237-4b82-8806-d7abedfad40c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.644955 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "6133e02f-8ece-4b6b-ac4a-c3871e017c1e" (UID: "6133e02f-8ece-4b6b-ac4a-c3871e017c1e"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.645420 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e2fec95b-4e40-4761-9d14-6abfeb78d9c0" (UID: "e2fec95b-4e40-4761-9d14-6abfeb78d9c0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.663196 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0cb677df-7237-4b82-8806-d7abedfad40c" (UID: "0cb677df-7237-4b82-8806-d7abedfad40c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.699077 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.699102 4857 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.699112 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.699121 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.699130 4857 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.699138 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6133e02f-8ece-4b6b-ac4a-c3871e017c1e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.699146 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.699153 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0cb677df-7237-4b82-8806-d7abedfad40c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.699220 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.699267 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-config-data podName:71cc1f00-1a63-428e-8f12-2136ab077860 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:14.69925163 +0000 UTC m=+1546.726626797 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-config-data") pod "rabbitmq-cell1-server-0" (UID: "71cc1f00-1a63-428e-8f12-2136ab077860") : configmap "rabbitmq-cell1-config-data" not found Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.729454 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "e2fec95b-4e40-4761-9d14-6abfeb78d9c0" (UID: "e2fec95b-4e40-4761-9d14-6abfeb78d9c0"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.735960 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "e2fec95b-4e40-4761-9d14-6abfeb78d9c0" (UID: "e2fec95b-4e40-4761-9d14-6abfeb78d9c0"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.778180 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 015bbd0a1a1e9fb405214fe7a35a6c512629833b2d306bd11d97dfd7b5021dee is running failed: container process not found" containerID="015bbd0a1a1e9fb405214fe7a35a6c512629833b2d306bd11d97dfd7b5021dee" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.778789 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 015bbd0a1a1e9fb405214fe7a35a6c512629833b2d306bd11d97dfd7b5021dee is running failed: container process not found" containerID="015bbd0a1a1e9fb405214fe7a35a6c512629833b2d306bd11d97dfd7b5021dee" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.778987 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 015bbd0a1a1e9fb405214fe7a35a6c512629833b2d306bd11d97dfd7b5021dee is running failed: container process not found" containerID="015bbd0a1a1e9fb405214fe7a35a6c512629833b2d306bd11d97dfd7b5021dee" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 28 13:44:10 crc kubenswrapper[4857]: E1128 13:44:10.779010 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 015bbd0a1a1e9fb405214fe7a35a6c512629833b2d306bd11d97dfd7b5021dee is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-t99ql" podUID="b1f7e362-6e6b-4636-b551-4533ad037811" containerName="ovn-controller" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.800709 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.800831 4857 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2fec95b-4e40-4761-9d14-6abfeb78d9c0-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.913740 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-k7b77" event={"ID":"8bb8cc13-eda7-4c41-9878-77ddabd55f4b","Type":"ContainerDied","Data":"ca0c60f161c0f6c7d1f1e262afdaa8650aa63163101e99fdf662fb1788ac9c9b"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.913829 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"64da16e3-099d-4def-9656-91f40d64672f","Type":"ContainerDied","Data":"ee074130ed95276ff4a950681c7df3344a6e4c3aa86435eb4b8f9e471126f272"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.913872 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-856655ccc5-9fgqc"] Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.913900 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286","Type":"ContainerDied","Data":"e575a5748441c404e5228a5c2146f98ab1fd6c5ae67eb9523fedd879f306a6a7"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.913941 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a53cec78-89c3-4495-8af6-4caf4f018cc1","Type":"ContainerDied","Data":"5ac6adaa76a02bc0a74df277af75098ac24f5239a90ad5f23966871efb74d2a3"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.913957 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a53cec78-89c3-4495-8af6-4caf4f018cc1","Type":"ContainerDied","Data":"2b1f1cfc83df026dae7bf7bf7c447aef3e892986be530a90b20b933b8fe1c77c"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.913969 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-56664b65dc-mkdgh" event={"ID":"f411fba7-d7b2-4d97-9388-c1b6f57e8328","Type":"ContainerDied","Data":"df7bca3ad7fcc3cc2ac1df9b77614d94b72b4db2a23294091359d1e948b3577e"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.913983 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-56664b65dc-mkdgh" event={"ID":"f411fba7-d7b2-4d97-9388-c1b6f57e8328","Type":"ContainerDied","Data":"8b3ff8b7cb9bbbd5d33a06e4dc7773db3800416089a3b589b96e2930ebcb5b38"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914024 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b6a593cc-74b3-4a02-ba7a-f4c5d7400476","Type":"ContainerDied","Data":"a840981835cecbc52064fb805056a67f1d699c2bf561f689da71d182309a6ea3"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914047 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-h799k" event={"ID":"0cb677df-7237-4b82-8806-d7abedfad40c","Type":"ContainerDied","Data":"a8612ed633c49d689316f547b420399bf07d8c73db8c5afb78385d362beaca55"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914061 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"6133e02f-8ece-4b6b-ac4a-c3871e017c1e","Type":"ContainerDied","Data":"69d3044fe2e1fa6c916ac564e064f861f8402dc3a6ca1d761f53522ebfd67093"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914099 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glanceeef9-account-delete-chvxh" event={"ID":"bf861df0-ad6e-4a39-9932-395afa59e76d","Type":"ContainerStarted","Data":"12e15364826b54c9d3daec1b157d14810f19e37315fa2290659bf5d554e0e354"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914118 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glanceeef9-account-delete-chvxh" event={"ID":"bf861df0-ad6e-4a39-9932-395afa59e76d","Type":"ContainerStarted","Data":"7abe9a1c5042790105001c559890bd4e71df61c138912ae23e81360e2ec7ce21"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914133 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-t99ql" event={"ID":"b1f7e362-6e6b-4636-b551-4533ad037811","Type":"ContainerDied","Data":"caad3084e358c3068d0b0f33e081298a4dc76f14aee40ff63720ff962289097d"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914149 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="caad3084e358c3068d0b0f33e081298a4dc76f14aee40ff63720ff962289097d" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914189 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"e2fec95b-4e40-4761-9d14-6abfeb78d9c0","Type":"ContainerDied","Data":"bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914209 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"e2fec95b-4e40-4761-9d14-6abfeb78d9c0","Type":"ContainerDied","Data":"457de2cc0d2a22e0e6bf0896f8d33c2d72e60d0f25a91797147a73b2413af013"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914223 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7d4894d65-gqnvs" event={"ID":"151aff2f-7aaa-4964-8f75-51c8faf86397","Type":"ContainerDied","Data":"ec785d0624d75a82e22bd01f7edfc8b3b369f0fa8f2251c36725e8484e0c04f0"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914238 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerDied","Data":"f4326c863e90d24d084dbd8c33e41f8c3206bed85eb21a3ea86b5f28906b546e"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914285 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerDied","Data":"ac8cee2bfbd683e0bf23daa4541d27abead299ecd058365d22a491cf7e370d73"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914333 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerDied","Data":"7c06071c7ab94c9d389a4416468a8dae45a8d25bf695e4f1589bbb97f67b9ff8"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914354 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerDied","Data":"ca6e4b07ddb5ce36fc245cda1d9e032e507ab8f51871ae61fc6d91bf3d94fbcc"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914368 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerDied","Data":"4d8e6435aaf1596d23240a79db43f33846ba61b7ae4b65eb49e14339421d4856"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914404 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerDied","Data":"ee780d1664f73c1b0efc34f94bcba32ef69c9316883ef0a536cf48cc92544c85"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914423 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerDied","Data":"ce54c7d58ad42d61d735cc7c28384296c4ccdf392def1ceb2994e2fc57811e5e"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914438 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerDied","Data":"c31f95cb1b9a065f105c67a07bd5d5b7cf66901a282cda1c3bec560e21d74414"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914452 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerDied","Data":"43f3af2bcb6a92ec4e0c79358397d8a0e3515b9b8ec39a557f85c39ba849f2e2"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914524 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron5867-account-delete-58twd" event={"ID":"89adcb9a-b993-4e60-ae3b-413bed35ae0d","Type":"ContainerStarted","Data":"61e85e27a22224665fbae14e722d6be8aeebc908a627e8c7f4e2f1f22deb8c46"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914545 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron5867-account-delete-58twd" event={"ID":"89adcb9a-b993-4e60-ae3b-413bed35ae0d","Type":"ContainerStarted","Data":"9274a232b3ca2ca988b7f756090b8c2800a83d2a5c93887b4ad8ab45f68ebed2"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914561 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4477c075-9151-49cc-bb52-82dc34ea46ec","Type":"ContainerDied","Data":"e24143c91b4a17a69c27afa164bb157bee14c4f0597ed2fa5ef6a42ffe793925"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914578 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementcdcd-account-delete-h5qc4" event={"ID":"e1ac27d1-5ad2-40ed-af2b-18668e48ead3","Type":"ContainerStarted","Data":"0bc4ed5492f1aa664fc5c8617d9474c0b50cc73e3dee3adca82c42e83d55f771"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914593 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementcdcd-account-delete-h5qc4" event={"ID":"e1ac27d1-5ad2-40ed-af2b-18668e48ead3","Type":"ContainerStarted","Data":"289b2e704693fc06f78e08b3a90fd18a768688096c5ca173277c09d24138648e"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914637 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-575548d9c6-4zx6z" event={"ID":"0d0c82d5-b320-444c-a4d9-838ca3097157","Type":"ContainerDied","Data":"d880cc69cc93c55dd123da2ed1ba8cf195b6e491b2fba33f24d18a403279c8c6"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914655 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-ph2cf" event={"ID":"c80a8609-29af-4833-856c-ee4094abcc0c","Type":"ContainerDied","Data":"7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914671 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" event={"ID":"0d2e145c-5068-4dff-a35e-14fe385cdcf2","Type":"ContainerDied","Data":"89d728886b576deecbaf0ff9d24f62808734b9aa348aaf482ac876e2835345d1"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.914687 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" event={"ID":"0d2e145c-5068-4dff-a35e-14fe385cdcf2","Type":"ContainerDied","Data":"bf2b4d3b1fd6b8f1241149d7a1019420f1580f221c1eb7ad343a94db93716eb4"} Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.918877 4857 scope.go:117] "RemoveContainer" containerID="c9dfde4fc40c233928885d1fb977721a4b1687b44081115fcf5a00db9ce6907f" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.919167 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-856655ccc5-9fgqc" podUID="7358aa80-dbe4-4a31-ad84-9dc125491046" containerName="proxy-httpd" containerID="cri-o://921da2286c74b9205a4963fadea18299c07583052be029c357bcd68f1c378c4d" gracePeriod=30 Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.919601 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-856655ccc5-9fgqc" podUID="7358aa80-dbe4-4a31-ad84-9dc125491046" containerName="proxy-server" containerID="cri-o://7f1eabd058b1d022ba7f7cbccd8b90653ba66843bcbc94ab126462d61013e688" gracePeriod=30 Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:10.940528 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican7dd8-account-delete-jg2j5"] Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:10.954897 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-t99ql" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.004601 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b1f7e362-6e6b-4636-b551-4533ad037811-var-run\") pod \"b1f7e362-6e6b-4636-b551-4533ad037811\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.004676 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b1f7e362-6e6b-4636-b551-4533ad037811-var-run-ovn\") pod \"b1f7e362-6e6b-4636-b551-4533ad037811\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.004797 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b1f7e362-6e6b-4636-b551-4533ad037811-var-run" (OuterVolumeSpecName: "var-run") pod "b1f7e362-6e6b-4636-b551-4533ad037811" (UID: "b1f7e362-6e6b-4636-b551-4533ad037811"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.004850 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b1f7e362-6e6b-4636-b551-4533ad037811-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "b1f7e362-6e6b-4636-b551-4533ad037811" (UID: "b1f7e362-6e6b-4636-b551-4533ad037811"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.005180 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1f7e362-6e6b-4636-b551-4533ad037811-ovn-controller-tls-certs\") pod \"b1f7e362-6e6b-4636-b551-4533ad037811\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.005272 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2l8j\" (UniqueName: \"kubernetes.io/projected/b1f7e362-6e6b-4636-b551-4533ad037811-kube-api-access-q2l8j\") pod \"b1f7e362-6e6b-4636-b551-4533ad037811\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.005349 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b1f7e362-6e6b-4636-b551-4533ad037811-scripts\") pod \"b1f7e362-6e6b-4636-b551-4533ad037811\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.005486 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b1f7e362-6e6b-4636-b551-4533ad037811-var-log-ovn\") pod \"b1f7e362-6e6b-4636-b551-4533ad037811\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.005513 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1f7e362-6e6b-4636-b551-4533ad037811-combined-ca-bundle\") pod \"b1f7e362-6e6b-4636-b551-4533ad037811\" (UID: \"b1f7e362-6e6b-4636-b551-4533ad037811\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.006123 4857 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b1f7e362-6e6b-4636-b551-4533ad037811-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.006138 4857 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b1f7e362-6e6b-4636-b551-4533ad037811-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.007878 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b1f7e362-6e6b-4636-b551-4533ad037811-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "b1f7e362-6e6b-4636-b551-4533ad037811" (UID: "b1f7e362-6e6b-4636-b551-4533ad037811"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.008264 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1f7e362-6e6b-4636-b551-4533ad037811-scripts" (OuterVolumeSpecName: "scripts") pod "b1f7e362-6e6b-4636-b551-4533ad037811" (UID: "b1f7e362-6e6b-4636-b551-4533ad037811"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.048980 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1f7e362-6e6b-4636-b551-4533ad037811-kube-api-access-q2l8j" (OuterVolumeSpecName: "kube-api-access-q2l8j") pod "b1f7e362-6e6b-4636-b551-4533ad037811" (UID: "b1f7e362-6e6b-4636-b551-4533ad037811"). InnerVolumeSpecName "kube-api-access-q2l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.082345 4857 scope.go:117] "RemoveContainer" containerID="54ebf8119e8b85e98e03f36c99c69b277451e9d96ff0dda0b092e460eb535292" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.082393 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1f7e362-6e6b-4636-b551-4533ad037811-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b1f7e362-6e6b-4636-b551-4533ad037811" (UID: "b1f7e362-6e6b-4636-b551-4533ad037811"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.100117 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.107731 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2l8j\" (UniqueName: \"kubernetes.io/projected/b1f7e362-6e6b-4636-b551-4533ad037811-kube-api-access-q2l8j\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.108052 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b1f7e362-6e6b-4636-b551-4533ad037811-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.108210 4857 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b1f7e362-6e6b-4636-b551-4533ad037811-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.108289 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1f7e362-6e6b-4636-b551-4533ad037811-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.109486 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinderd8b3-account-delete-lxwj8"] Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.187251 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.203830 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapi7cc9-account-delete-qjqg5"] Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.211040 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d2e145c-5068-4dff-a35e-14fe385cdcf2-logs\") pod \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.212361 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d2e145c-5068-4dff-a35e-14fe385cdcf2-config-data-custom\") pod \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.212470 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v64tr\" (UniqueName: \"kubernetes.io/projected/0d2e145c-5068-4dff-a35e-14fe385cdcf2-kube-api-access-v64tr\") pod \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.212555 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d2e145c-5068-4dff-a35e-14fe385cdcf2-combined-ca-bundle\") pod \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.212672 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d2e145c-5068-4dff-a35e-14fe385cdcf2-config-data\") pod \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\" (UID: \"0d2e145c-5068-4dff-a35e-14fe385cdcf2\") " Nov 28 13:44:11 crc kubenswrapper[4857]: E1128 13:44:11.214175 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 13:44:11 crc kubenswrapper[4857]: E1128 13:44:11.214347 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-config-data podName:cfbd0457-d459-4bf2-bdaf-8b61db5cce65 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:15.214323403 +0000 UTC m=+1547.241698570 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-config-data") pod "rabbitmq-server-0" (UID: "cfbd0457-d459-4bf2-bdaf-8b61db5cce65") : configmap "rabbitmq-config-data" not found Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.215141 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d2e145c-5068-4dff-a35e-14fe385cdcf2-logs" (OuterVolumeSpecName: "logs") pod "0d2e145c-5068-4dff-a35e-14fe385cdcf2" (UID: "0d2e145c-5068-4dff-a35e-14fe385cdcf2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.219901 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.225057 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d2e145c-5068-4dff-a35e-14fe385cdcf2-kube-api-access-v64tr" (OuterVolumeSpecName: "kube-api-access-v64tr") pod "0d2e145c-5068-4dff-a35e-14fe385cdcf2" (UID: "0d2e145c-5068-4dff-a35e-14fe385cdcf2"). InnerVolumeSpecName "kube-api-access-v64tr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.233115 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d2e145c-5068-4dff-a35e-14fe385cdcf2-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0d2e145c-5068-4dff-a35e-14fe385cdcf2" (UID: "0d2e145c-5068-4dff-a35e-14fe385cdcf2"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.234815 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell032fc-account-delete-xk7xx"] Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.235254 4857 scope.go:117] "RemoveContainer" containerID="54250c0eff6684e824788b7e12d3730da9501d1501683c7fd89bd82f1a19cc5d" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.246733 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-h799k"] Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.255711 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-h799k"] Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.263703 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d2e145c-5068-4dff-a35e-14fe385cdcf2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0d2e145c-5068-4dff-a35e-14fe385cdcf2" (UID: "0d2e145c-5068-4dff-a35e-14fe385cdcf2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.266897 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.277943 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.287362 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1f7e362-6e6b-4636-b551-4533ad037811-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "b1f7e362-6e6b-4636-b551-4533ad037811" (UID: "b1f7e362-6e6b-4636-b551-4533ad037811"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.295529 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-k7b77"] Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.299055 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d2e145c-5068-4dff-a35e-14fe385cdcf2-config-data" (OuterVolumeSpecName: "config-data") pod "0d2e145c-5068-4dff-a35e-14fe385cdcf2" (UID: "0d2e145c-5068-4dff-a35e-14fe385cdcf2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.308807 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-k7b77"] Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.315157 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d2e145c-5068-4dff-a35e-14fe385cdcf2-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.315195 4857 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1f7e362-6e6b-4636-b551-4533ad037811-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.315206 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d2e145c-5068-4dff-a35e-14fe385cdcf2-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.315217 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d2e145c-5068-4dff-a35e-14fe385cdcf2-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.315225 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v64tr\" (UniqueName: \"kubernetes.io/projected/0d2e145c-5068-4dff-a35e-14fe385cdcf2-kube-api-access-v64tr\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.315234 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d2e145c-5068-4dff-a35e-14fe385cdcf2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.402507 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell10c56-account-delete-6l7tk" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.409179 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-856655ccc5-9fgqc" podUID="7358aa80-dbe4-4a31-ad84-9dc125491046" containerName="proxy-server" probeResult="failure" output="Get \"https://10.217.0.167:8080/healthcheck\": dial tcp 10.217.0.167:8080: connect: connection refused" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.409899 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-856655ccc5-9fgqc" podUID="7358aa80-dbe4-4a31-ad84-9dc125491046" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.167:8080/healthcheck\": dial tcp 10.217.0.167:8080: connect: connection refused" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.436477 4857 scope.go:117] "RemoveContainer" containerID="1412a3b0dbe8994e239691c6f96324d04036bd95c69256db95ab0b15e2c14255" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.555697 4857 scope.go:117] "RemoveContainer" containerID="7f7bc0064f521471cf62c8788f876e9c2ad9aae9c8e92b24025dd3c24bcd9aaf" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.584186 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:44:11 crc kubenswrapper[4857]: E1128 13:44:11.600103 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="165d4ed1ed3f722b3e2c4f5769e7c056c9eafbeeed14d27d5233ab4f09a4ef4d" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 13:44:11 crc kubenswrapper[4857]: E1128 13:44:11.601636 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="165d4ed1ed3f722b3e2c4f5769e7c056c9eafbeeed14d27d5233ab4f09a4ef4d" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 13:44:11 crc kubenswrapper[4857]: E1128 13:44:11.610848 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="165d4ed1ed3f722b3e2c4f5769e7c056c9eafbeeed14d27d5233ab4f09a4ef4d" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 13:44:11 crc kubenswrapper[4857]: E1128 13:44:11.610914 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd" containerName="nova-cell0-conductor-conductor" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.617194 4857 scope.go:117] "RemoveContainer" containerID="8dea0ac70575cce5895b3d523d663e080996fff826c13ed3d4f241bb7bf27649" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.673451 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.703214 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b6a593cc-74b3-4a02-ba7a-f4c5d7400476","Type":"ContainerDied","Data":"cf59d27af77803d2eb7fcf1aca8c75a080389519bdbf5d6439918de1ac607955"} Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.703518 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.704468 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.709843 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.710157 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerName="ceilometer-central-agent" containerID="cri-o://3e988a9d71b894b528cad9cf749fa687e397d909171f243b57b66253d5c4fcf4" gracePeriod=30 Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.710311 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerName="proxy-httpd" containerID="cri-o://758611f27d908e2a9d4f2cb15d9c474f4f04bb2c788bba7c25fe962588bee8ea" gracePeriod=30 Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.710351 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerName="sg-core" containerID="cri-o://53c5151a4983e3c03ad2115ba0190cda5364aa8956976486e4a3dda5c19894bc" gracePeriod=30 Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.710385 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerName="ceilometer-notification-agent" containerID="cri-o://bcfc962c58adc4335b9f934de14d9b2330cea19877b855df32060d57a9431c59" gracePeriod=30 Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.721318 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f411fba7-d7b2-4d97-9388-c1b6f57e8328-config-data-custom\") pod \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.721378 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f411fba7-d7b2-4d97-9388-c1b6f57e8328-config-data\") pod \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.721445 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f411fba7-d7b2-4d97-9388-c1b6f57e8328-logs\") pod \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.721486 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nbpbt\" (UniqueName: \"kubernetes.io/projected/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-kube-api-access-nbpbt\") pod \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.721532 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-config-data\") pod \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.721558 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-combined-ca-bundle\") pod \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.721576 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-vencrypt-tls-certs\") pod \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.721599 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t66xl\" (UniqueName: \"kubernetes.io/projected/f411fba7-d7b2-4d97-9388-c1b6f57e8328-kube-api-access-t66xl\") pod \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.721650 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-nova-novncproxy-tls-certs\") pod \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\" (UID: \"b6a593cc-74b3-4a02-ba7a-f4c5d7400476\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.721722 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f411fba7-d7b2-4d97-9388-c1b6f57e8328-combined-ca-bundle\") pod \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\" (UID: \"f411fba7-d7b2-4d97-9388-c1b6f57e8328\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.739353 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f411fba7-d7b2-4d97-9388-c1b6f57e8328-logs" (OuterVolumeSpecName: "logs") pod "f411fba7-d7b2-4d97-9388-c1b6f57e8328" (UID: "f411fba7-d7b2-4d97-9388-c1b6f57e8328"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.752361 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f411fba7-d7b2-4d97-9388-c1b6f57e8328-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f411fba7-d7b2-4d97-9388-c1b6f57e8328" (UID: "f411fba7-d7b2-4d97-9388-c1b6f57e8328"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.752771 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.764916 4857 generic.go:334] "Generic (PLEG): container finished" podID="41687469-06d7-47ab-ad25-d32df165e1e2" containerID="9edd8ca732119343257da06c9d3c8090ac7032d415e0af5cc821df9c9c20bf76" exitCode=0 Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.764947 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"41687469-06d7-47ab-ad25-d32df165e1e2","Type":"ContainerDied","Data":"9edd8ca732119343257da06c9d3c8090ac7032d415e0af5cc821df9c9c20bf76"} Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.765010 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"41687469-06d7-47ab-ad25-d32df165e1e2","Type":"ContainerDied","Data":"273deb67415158afe19f6d7a4cc4025be96b58873f3ec2c41104f83ef6c13125"} Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.766147 4857 scope.go:117] "RemoveContainer" containerID="bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.773952 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-kube-api-access-nbpbt" (OuterVolumeSpecName: "kube-api-access-nbpbt") pod "b6a593cc-74b3-4a02-ba7a-f4c5d7400476" (UID: "b6a593cc-74b3-4a02-ba7a-f4c5d7400476"). InnerVolumeSpecName "kube-api-access-nbpbt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.774840 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.775167 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="1fa6d725-8054-46f1-8c0c-c693d5306563" containerName="kube-state-metrics" containerID="cri-o://5293dd02b3d8cbb50029798677d596c61dae0e02fb1b0ef17359254ce5d584b6" gracePeriod=30 Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.789408 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f411fba7-d7b2-4d97-9388-c1b6f57e8328-kube-api-access-t66xl" (OuterVolumeSpecName: "kube-api-access-t66xl") pod "f411fba7-d7b2-4d97-9388-c1b6f57e8328" (UID: "f411fba7-d7b2-4d97-9388-c1b6f57e8328"). InnerVolumeSpecName "kube-api-access-t66xl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.789816 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell032fc-account-delete-xk7xx" event={"ID":"ad7bc32b-e1f1-4ce5-a094-56f37d676131","Type":"ContainerStarted","Data":"bbafd5b524434b94d1c4d5221aacb50eb0ef8348685cfe759c6ef6713ce991f4"} Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.805257 4857 generic.go:334] "Generic (PLEG): container finished" podID="7358aa80-dbe4-4a31-ad84-9dc125491046" containerID="7f1eabd058b1d022ba7f7cbccd8b90653ba66843bcbc94ab126462d61013e688" exitCode=0 Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.805293 4857 generic.go:334] "Generic (PLEG): container finished" podID="7358aa80-dbe4-4a31-ad84-9dc125491046" containerID="921da2286c74b9205a4963fadea18299c07583052be029c357bcd68f1c378c4d" exitCode=0 Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.805338 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-856655ccc5-9fgqc" event={"ID":"7358aa80-dbe4-4a31-ad84-9dc125491046","Type":"ContainerDied","Data":"7f1eabd058b1d022ba7f7cbccd8b90653ba66843bcbc94ab126462d61013e688"} Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.805365 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-856655ccc5-9fgqc" event={"ID":"7358aa80-dbe4-4a31-ad84-9dc125491046","Type":"ContainerDied","Data":"921da2286c74b9205a4963fadea18299c07583052be029c357bcd68f1c378c4d"} Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.824280 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41687469-06d7-47ab-ad25-d32df165e1e2-combined-ca-bundle\") pod \"41687469-06d7-47ab-ad25-d32df165e1e2\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.824328 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"41687469-06d7-47ab-ad25-d32df165e1e2\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.824366 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/41687469-06d7-47ab-ad25-d32df165e1e2-config-data-default\") pod \"41687469-06d7-47ab-ad25-d32df165e1e2\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.824406 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a53cec78-89c3-4495-8af6-4caf4f018cc1-etc-machine-id\") pod \"a53cec78-89c3-4495-8af6-4caf4f018cc1\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.824432 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-config-data-custom\") pod \"a53cec78-89c3-4495-8af6-4caf4f018cc1\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.824456 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/41687469-06d7-47ab-ad25-d32df165e1e2-config-data-generated\") pod \"41687469-06d7-47ab-ad25-d32df165e1e2\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.824496 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-config-data\") pod \"a53cec78-89c3-4495-8af6-4caf4f018cc1\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.824523 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/41687469-06d7-47ab-ad25-d32df165e1e2-kolla-config\") pod \"41687469-06d7-47ab-ad25-d32df165e1e2\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.824615 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-combined-ca-bundle\") pod \"a53cec78-89c3-4495-8af6-4caf4f018cc1\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.824617 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a53cec78-89c3-4495-8af6-4caf4f018cc1-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "a53cec78-89c3-4495-8af6-4caf4f018cc1" (UID: "a53cec78-89c3-4495-8af6-4caf4f018cc1"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.824694 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-99tbc\" (UniqueName: \"kubernetes.io/projected/41687469-06d7-47ab-ad25-d32df165e1e2-kube-api-access-99tbc\") pod \"41687469-06d7-47ab-ad25-d32df165e1e2\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.824735 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-scripts\") pod \"a53cec78-89c3-4495-8af6-4caf4f018cc1\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.824799 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41687469-06d7-47ab-ad25-d32df165e1e2-operator-scripts\") pod \"41687469-06d7-47ab-ad25-d32df165e1e2\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.824827 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h72g5\" (UniqueName: \"kubernetes.io/projected/a53cec78-89c3-4495-8af6-4caf4f018cc1-kube-api-access-h72g5\") pod \"a53cec78-89c3-4495-8af6-4caf4f018cc1\" (UID: \"a53cec78-89c3-4495-8af6-4caf4f018cc1\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.824845 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/41687469-06d7-47ab-ad25-d32df165e1e2-galera-tls-certs\") pod \"41687469-06d7-47ab-ad25-d32df165e1e2\" (UID: \"41687469-06d7-47ab-ad25-d32df165e1e2\") " Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.825204 4857 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a53cec78-89c3-4495-8af6-4caf4f018cc1-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.825215 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f411fba7-d7b2-4d97-9388-c1b6f57e8328-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.825224 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nbpbt\" (UniqueName: \"kubernetes.io/projected/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-kube-api-access-nbpbt\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.825236 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t66xl\" (UniqueName: \"kubernetes.io/projected/f411fba7-d7b2-4d97-9388-c1b6f57e8328-kube-api-access-t66xl\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.825246 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f411fba7-d7b2-4d97-9388-c1b6f57e8328-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.827655 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41687469-06d7-47ab-ad25-d32df165e1e2-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "41687469-06d7-47ab-ad25-d32df165e1e2" (UID: "41687469-06d7-47ab-ad25-d32df165e1e2"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.827693 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41687469-06d7-47ab-ad25-d32df165e1e2-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "41687469-06d7-47ab-ad25-d32df165e1e2" (UID: "41687469-06d7-47ab-ad25-d32df165e1e2"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.829948 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41687469-06d7-47ab-ad25-d32df165e1e2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "41687469-06d7-47ab-ad25-d32df165e1e2" (UID: "41687469-06d7-47ab-ad25-d32df165e1e2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.830500 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41687469-06d7-47ab-ad25-d32df165e1e2-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "41687469-06d7-47ab-ad25-d32df165e1e2" (UID: "41687469-06d7-47ab-ad25-d32df165e1e2"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.832571 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.840080 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-9689bdb94-frvhg" event={"ID":"0d2e145c-5068-4dff-a35e-14fe385cdcf2","Type":"ContainerDied","Data":"66bd52cc70500c43fcbe9f5c5ac33ce420cc24212dd17f80d02d735ec7519d64"} Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.871879 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="cfbd0457-d459-4bf2-bdaf-8b61db5cce65" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.101:5671: connect: connection refused" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.881547 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf861df0-ad6e-4a39-9932-395afa59e76d" containerID="12e15364826b54c9d3daec1b157d14810f19e37315fa2290659bf5d554e0e354" exitCode=0 Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.881637 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glanceeef9-account-delete-chvxh" event={"ID":"bf861df0-ad6e-4a39-9932-395afa59e76d","Type":"ContainerDied","Data":"12e15364826b54c9d3daec1b157d14810f19e37315fa2290659bf5d554e0e354"} Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.905037 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.905249 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="30a2b522-ef43-4b0a-8215-2bb928744e00" containerName="memcached" containerID="cri-o://32156962d3c5fd3e7bbc12ce4bc19050625834d7bd9f60bcb681cfb5610ca641" gracePeriod=30 Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.928028 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41687469-06d7-47ab-ad25-d32df165e1e2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.928048 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/41687469-06d7-47ab-ad25-d32df165e1e2-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.928057 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/41687469-06d7-47ab-ad25-d32df165e1e2-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.928066 4857 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/41687469-06d7-47ab-ad25-d32df165e1e2-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.929858 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-g2shl"] Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.933548 4857 generic.go:334] "Generic (PLEG): container finished" podID="89adcb9a-b993-4e60-ae3b-413bed35ae0d" containerID="61e85e27a22224665fbae14e722d6be8aeebc908a627e8c7f4e2f1f22deb8c46" exitCode=0 Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.933628 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron5867-account-delete-58twd" event={"ID":"89adcb9a-b993-4e60-ae3b-413bed35ae0d","Type":"ContainerDied","Data":"61e85e27a22224665fbae14e722d6be8aeebc908a627e8c7f4e2f1f22deb8c46"} Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.939525 4857 scope.go:117] "RemoveContainer" containerID="8dea0ac70575cce5895b3d523d663e080996fff826c13ed3d4f241bb7bf27649" Nov 28 13:44:11 crc kubenswrapper[4857]: E1128 13:44:11.943397 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8dea0ac70575cce5895b3d523d663e080996fff826c13ed3d4f241bb7bf27649\": container with ID starting with 8dea0ac70575cce5895b3d523d663e080996fff826c13ed3d4f241bb7bf27649 not found: ID does not exist" containerID="8dea0ac70575cce5895b3d523d663e080996fff826c13ed3d4f241bb7bf27649" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.943444 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8dea0ac70575cce5895b3d523d663e080996fff826c13ed3d4f241bb7bf27649"} err="failed to get container status \"8dea0ac70575cce5895b3d523d663e080996fff826c13ed3d4f241bb7bf27649\": rpc error: code = NotFound desc = could not find container \"8dea0ac70575cce5895b3d523d663e080996fff826c13ed3d4f241bb7bf27649\": container with ID starting with 8dea0ac70575cce5895b3d523d663e080996fff826c13ed3d4f241bb7bf27649 not found: ID does not exist" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.943472 4857 scope.go:117] "RemoveContainer" containerID="bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a" Nov 28 13:44:11 crc kubenswrapper[4857]: E1128 13:44:11.944831 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a\": container with ID starting with bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a not found: ID does not exist" containerID="bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.944861 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a"} err="failed to get container status \"bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a\": rpc error: code = NotFound desc = could not find container \"bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a\": container with ID starting with bc6f23278057204397c35369662b163cc069bd8443a701043e5b2fc7356a153a not found: ID does not exist" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.944882 4857 scope.go:117] "RemoveContainer" containerID="f53ded071ea01c8120bc4be89662f272bd544a870cac413afe982b426f8f618a" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.952251 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-56664b65dc-mkdgh" event={"ID":"f411fba7-d7b2-4d97-9388-c1b6f57e8328","Type":"ContainerDied","Data":"19d4a2ab4802b975ae7e0927266d7ad3cf0970d05ff5693a288b543337eaba1d"} Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.952455 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-56664b65dc-mkdgh" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.958301 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-cwhln"] Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.964113 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinderd8b3-account-delete-lxwj8" event={"ID":"24a3dca4-a3d0-479d-9be8-fb8c16f97a77","Type":"ContainerStarted","Data":"017d43f882bf4a5325fc042ae896444e41b03857edf394e39ab7e177cc19b3bf"} Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.972622 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican7dd8-account-delete-jg2j5" event={"ID":"d8c0e041-9c74-4a06-a966-833e919e745a","Type":"ContainerStarted","Data":"85cc8f8947446485c19e1a73e1557ca7c966bab25b33207b2360b87918d2a63c"} Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.974734 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="7bee7127-9367-4882-8ab1-0493128d2641" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.175:9292/healthcheck\": read tcp 10.217.0.2:34632->10.217.0.175:9292: read: connection reset by peer" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.974741 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="7bee7127-9367-4882-8ab1-0493128d2641" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.175:9292/healthcheck\": read tcp 10.217.0.2:34624->10.217.0.175:9292: read: connection reset by peer" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.978982 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a53cec78-89c3-4495-8af6-4caf4f018cc1","Type":"ContainerDied","Data":"eae65d10eff323342c881919bd6d872994f4a98e32c7f45bf6b6f9513af9dc8e"} Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.980512 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.996990 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-g2shl"] Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.000082 4857 generic.go:334] "Generic (PLEG): container finished" podID="e1ac27d1-5ad2-40ed-af2b-18668e48ead3" containerID="0bc4ed5492f1aa664fc5c8617d9474c0b50cc73e3dee3adca82c42e83d55f771" exitCode=0 Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.000177 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementcdcd-account-delete-h5qc4" event={"ID":"e1ac27d1-5ad2-40ed-af2b-18668e48ead3","Type":"ContainerDied","Data":"0bc4ed5492f1aa664fc5c8617d9474c0b50cc73e3dee3adca82c42e83d55f771"} Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.016565 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8b4e711ca231fe7452ef2052a4b12b122b8a749ee4670a4b691165e5f548e102" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.020056 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8b4e711ca231fe7452ef2052a4b12b122b8a749ee4670a4b691165e5f548e102" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.021648 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi7cc9-account-delete-qjqg5" event={"ID":"6f75b361-6a38-42a4-971c-1b3a68a3f10f","Type":"ContainerStarted","Data":"6490c90ff4345c57ffecc838b7daecc71bba27fe8678efb3148de71bd883910c"} Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.023601 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell10c56-account-delete-6l7tk" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.028110 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-cwhln"] Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.028921 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-t99ql" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.035561 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8b4e711ca231fe7452ef2052a4b12b122b8a749ee4670a4b691165e5f548e102" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.035624 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="310b8699-5d0c-4cce-b8fd-90ccedc2ce85" containerName="nova-scheduler-scheduler" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.068296 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a53cec78-89c3-4495-8af6-4caf4f018cc1-kube-api-access-h72g5" (OuterVolumeSpecName: "kube-api-access-h72g5") pod "a53cec78-89c3-4495-8af6-4caf4f018cc1" (UID: "a53cec78-89c3-4495-8af6-4caf4f018cc1"). InnerVolumeSpecName "kube-api-access-h72g5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.073467 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-operator-scripts\") pod \"novacell10c56-account-delete-6l7tk\" (UID: \"a81fb5f5-33d2-4da6-86a6-d2f248a3364f\") " pod="openstack/novacell10c56-account-delete-6l7tk" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.073631 4857 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.073703 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-operator-scripts podName:a81fb5f5-33d2-4da6-86a6-d2f248a3364f nodeName:}" failed. No retries permitted until 2025-11-28 13:44:16.073682441 +0000 UTC m=+1548.101057598 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-operator-scripts") pod "novacell10c56-account-delete-6l7tk" (UID: "a81fb5f5-33d2-4da6-86a6-d2f248a3364f") : configmap "openstack-cell1-scripts" not found Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.073890 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h72g5\" (UniqueName: \"kubernetes.io/projected/a53cec78-89c3-4495-8af6-4caf4f018cc1-kube-api-access-h72g5\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.100308 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-5f4cb87f5f-m76pk"] Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.100711 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-5f4cb87f5f-m76pk" podUID="adfd05de-d1db-45d3-aea1-b35dc0110b71" containerName="keystone-api" containerID="cri-o://c6ba92e4d979c8b69f5fe686fd993ea41a62407d93386f6c437c35d1fe1b2018" gracePeriod=30 Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.123115 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone0c6f-account-delete-bbxfz"] Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.124033 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d2e145c-5068-4dff-a35e-14fe385cdcf2" containerName="barbican-keystone-listener" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124048 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d2e145c-5068-4dff-a35e-14fe385cdcf2" containerName="barbican-keystone-listener" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.124079 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a53cec78-89c3-4495-8af6-4caf4f018cc1" containerName="cinder-scheduler" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124085 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a53cec78-89c3-4495-8af6-4caf4f018cc1" containerName="cinder-scheduler" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.124102 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41687469-06d7-47ab-ad25-d32df165e1e2" containerName="mysql-bootstrap" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124108 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="41687469-06d7-47ab-ad25-d32df165e1e2" containerName="mysql-bootstrap" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.124125 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6133e02f-8ece-4b6b-ac4a-c3871e017c1e" containerName="openstack-network-exporter" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124131 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6133e02f-8ece-4b6b-ac4a-c3871e017c1e" containerName="openstack-network-exporter" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.124151 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cb677df-7237-4b82-8806-d7abedfad40c" containerName="init" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124158 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cb677df-7237-4b82-8806-d7abedfad40c" containerName="init" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.124175 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1f7e362-6e6b-4636-b551-4533ad037811" containerName="ovn-controller" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124181 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1f7e362-6e6b-4636-b551-4533ad037811" containerName="ovn-controller" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.124196 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6133e02f-8ece-4b6b-ac4a-c3871e017c1e" containerName="ovsdbserver-nb" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124202 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6133e02f-8ece-4b6b-ac4a-c3871e017c1e" containerName="ovsdbserver-nb" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.124217 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f411fba7-d7b2-4d97-9388-c1b6f57e8328" containerName="barbican-worker-log" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124224 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f411fba7-d7b2-4d97-9388-c1b6f57e8328" containerName="barbican-worker-log" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.124235 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bb8cc13-eda7-4c41-9878-77ddabd55f4b" containerName="openstack-network-exporter" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124243 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bb8cc13-eda7-4c41-9878-77ddabd55f4b" containerName="openstack-network-exporter" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.124255 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a53cec78-89c3-4495-8af6-4caf4f018cc1" containerName="probe" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124260 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a53cec78-89c3-4495-8af6-4caf4f018cc1" containerName="probe" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.124281 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2fec95b-4e40-4761-9d14-6abfeb78d9c0" containerName="ovsdbserver-sb" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124288 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2fec95b-4e40-4761-9d14-6abfeb78d9c0" containerName="ovsdbserver-sb" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.124309 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6a593cc-74b3-4a02-ba7a-f4c5d7400476" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124316 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6a593cc-74b3-4a02-ba7a-f4c5d7400476" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.124323 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cb677df-7237-4b82-8806-d7abedfad40c" containerName="dnsmasq-dns" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124329 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cb677df-7237-4b82-8806-d7abedfad40c" containerName="dnsmasq-dns" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.124346 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41687469-06d7-47ab-ad25-d32df165e1e2" containerName="galera" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124351 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="41687469-06d7-47ab-ad25-d32df165e1e2" containerName="galera" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.124368 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2fec95b-4e40-4761-9d14-6abfeb78d9c0" containerName="openstack-network-exporter" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124374 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2fec95b-4e40-4761-9d14-6abfeb78d9c0" containerName="openstack-network-exporter" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.124392 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f411fba7-d7b2-4d97-9388-c1b6f57e8328" containerName="barbican-worker" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124401 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f411fba7-d7b2-4d97-9388-c1b6f57e8328" containerName="barbican-worker" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.124426 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d2e145c-5068-4dff-a35e-14fe385cdcf2" containerName="barbican-keystone-listener-log" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124459 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d2e145c-5068-4dff-a35e-14fe385cdcf2" containerName="barbican-keystone-listener-log" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124774 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a53cec78-89c3-4495-8af6-4caf4f018cc1" containerName="cinder-scheduler" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124795 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bb8cc13-eda7-4c41-9878-77ddabd55f4b" containerName="openstack-network-exporter" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124802 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="41687469-06d7-47ab-ad25-d32df165e1e2" containerName="galera" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124825 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6133e02f-8ece-4b6b-ac4a-c3871e017c1e" containerName="openstack-network-exporter" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124845 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6a593cc-74b3-4a02-ba7a-f4c5d7400476" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124869 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1f7e362-6e6b-4636-b551-4533ad037811" containerName="ovn-controller" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124889 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a53cec78-89c3-4495-8af6-4caf4f018cc1" containerName="probe" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124901 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f411fba7-d7b2-4d97-9388-c1b6f57e8328" containerName="barbican-worker" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124917 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cb677df-7237-4b82-8806-d7abedfad40c" containerName="dnsmasq-dns" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124930 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6133e02f-8ece-4b6b-ac4a-c3871e017c1e" containerName="ovsdbserver-nb" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124950 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f411fba7-d7b2-4d97-9388-c1b6f57e8328" containerName="barbican-worker-log" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124966 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d2e145c-5068-4dff-a35e-14fe385cdcf2" containerName="barbican-keystone-listener-log" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124979 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2fec95b-4e40-4761-9d14-6abfeb78d9c0" containerName="ovsdbserver-sb" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.124993 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2fec95b-4e40-4761-9d14-6abfeb78d9c0" containerName="openstack-network-exporter" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.125005 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d2e145c-5068-4dff-a35e-14fe385cdcf2" containerName="barbican-keystone-listener" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.126696 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone0c6f-account-delete-bbxfz" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.148898 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="4477c075-9151-49cc-bb52-82dc34ea46ec" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.163:8776/healthcheck\": read tcp 10.217.0.2:60324->10.217.0.163:8776: read: connection reset by peer" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.162332 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-scripts" (OuterVolumeSpecName: "scripts") pod "a53cec78-89c3-4495-8af6-4caf4f018cc1" (UID: "a53cec78-89c3-4495-8af6-4caf4f018cc1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.163119 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41687469-06d7-47ab-ad25-d32df165e1e2-kube-api-access-99tbc" (OuterVolumeSpecName: "kube-api-access-99tbc") pod "41687469-06d7-47ab-ad25-d32df165e1e2" (UID: "41687469-06d7-47ab-ad25-d32df165e1e2"). InnerVolumeSpecName "kube-api-access-99tbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.164306 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a53cec78-89c3-4495-8af6-4caf4f018cc1" (UID: "a53cec78-89c3-4495-8af6-4caf4f018cc1"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.186500 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftjvm\" (UniqueName: \"kubernetes.io/projected/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-kube-api-access-ftjvm\") pod \"novacell10c56-account-delete-6l7tk\" (UID: \"a81fb5f5-33d2-4da6-86a6-d2f248a3364f\") " pod="openstack/novacell10c56-account-delete-6l7tk" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.194663 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-9689bdb94-frvhg"] Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.200715 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.200742 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-99tbc\" (UniqueName: \"kubernetes.io/projected/41687469-06d7-47ab-ad25-d32df165e1e2-kube-api-access-99tbc\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.200766 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.201040 4857 projected.go:194] Error preparing data for projected volume kube-api-access-ftjvm for pod openstack/novacell10c56-account-delete-6l7tk: failed to fetch token: pod "novacell10c56-account-delete-6l7tk" not found Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.201089 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-kube-api-access-ftjvm podName:a81fb5f5-33d2-4da6-86a6-d2f248a3364f nodeName:}" failed. No retries permitted until 2025-11-28 13:44:16.201073548 +0000 UTC m=+1548.228448715 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-ftjvm" (UniqueName: "kubernetes.io/projected/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-kube-api-access-ftjvm") pod "novacell10c56-account-delete-6l7tk" (UID: "a81fb5f5-33d2-4da6-86a6-d2f248a3364f") : failed to fetch token: pod "novacell10c56-account-delete-6l7tk" not found Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.205259 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-9689bdb94-frvhg"] Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.231006 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "mysql-db") pod "41687469-06d7-47ab-ad25-d32df165e1e2" (UID: "41687469-06d7-47ab-ad25-d32df165e1e2"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.256980 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone0c6f-account-delete-bbxfz"] Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.290032 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.292763 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-x2hhj"] Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.302206 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dab9a798-b94d-47b1-bd82-48ff5f477dc5-operator-scripts\") pod \"keystone0c6f-account-delete-bbxfz\" (UID: \"dab9a798-b94d-47b1-bd82-48ff5f477dc5\") " pod="openstack/keystone0c6f-account-delete-bbxfz" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.302395 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkt2b\" (UniqueName: \"kubernetes.io/projected/dab9a798-b94d-47b1-bd82-48ff5f477dc5-kube-api-access-kkt2b\") pod \"keystone0c6f-account-delete-bbxfz\" (UID: \"dab9a798-b94d-47b1-bd82-48ff5f477dc5\") " pod="openstack/keystone0c6f-account-delete-bbxfz" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.302472 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.305094 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-x2hhj"] Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.345087 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07621208-d831-4470-908c-76084c830753" path="/var/lib/kubelet/pods/07621208-d831-4470-908c-76084c830753/volumes" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.346609 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0cb677df-7237-4b82-8806-d7abedfad40c" path="/var/lib/kubelet/pods/0cb677df-7237-4b82-8806-d7abedfad40c/volumes" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.347349 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d2e145c-5068-4dff-a35e-14fe385cdcf2" path="/var/lib/kubelet/pods/0d2e145c-5068-4dff-a35e-14fe385cdcf2/volumes" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.348538 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e640153-f178-4532-af27-302cf3098ef4" path="/var/lib/kubelet/pods/2e640153-f178-4532-af27-302cf3098ef4/volumes" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.349442 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6133e02f-8ece-4b6b-ac4a-c3871e017c1e" path="/var/lib/kubelet/pods/6133e02f-8ece-4b6b-ac4a-c3871e017c1e/volumes" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.350219 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bb8cc13-eda7-4c41-9878-77ddabd55f4b" path="/var/lib/kubelet/pods/8bb8cc13-eda7-4c41-9878-77ddabd55f4b/volumes" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.351235 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96846d9c-1949-4655-be98-006b4e5dd154" path="/var/lib/kubelet/pods/96846d9c-1949-4655-be98-006b4e5dd154/volumes" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.351809 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a914f102-2a88-4272-933b-2f108273c581" path="/var/lib/kubelet/pods/a914f102-2a88-4272-933b-2f108273c581/volumes" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.352410 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2fec95b-4e40-4761-9d14-6abfeb78d9c0" path="/var/lib/kubelet/pods/e2fec95b-4e40-4761-9d14-6abfeb78d9c0/volumes" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.403879 4857 scope.go:117] "RemoveContainer" containerID="a840981835cecbc52064fb805056a67f1d699c2bf561f689da71d182309a6ea3" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.404915 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkt2b\" (UniqueName: \"kubernetes.io/projected/dab9a798-b94d-47b1-bd82-48ff5f477dc5-kube-api-access-kkt2b\") pod \"keystone0c6f-account-delete-bbxfz\" (UID: \"dab9a798-b94d-47b1-bd82-48ff5f477dc5\") " pod="openstack/keystone0c6f-account-delete-bbxfz" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.405743 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dab9a798-b94d-47b1-bd82-48ff5f477dc5-operator-scripts\") pod \"keystone0c6f-account-delete-bbxfz\" (UID: \"dab9a798-b94d-47b1-bd82-48ff5f477dc5\") " pod="openstack/keystone0c6f-account-delete-bbxfz" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.405939 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.406035 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/dab9a798-b94d-47b1-bd82-48ff5f477dc5-operator-scripts podName:dab9a798-b94d-47b1-bd82-48ff5f477dc5 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:12.906020744 +0000 UTC m=+1544.933395911 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/dab9a798-b94d-47b1-bd82-48ff5f477dc5-operator-scripts") pod "keystone0c6f-account-delete-bbxfz" (UID: "dab9a798-b94d-47b1-bd82-48ff5f477dc5") : configmap "openstack-scripts" not found Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.408174 4857 projected.go:194] Error preparing data for projected volume kube-api-access-kkt2b for pod openstack/keystone0c6f-account-delete-bbxfz: failed to fetch token: serviceaccounts "galera-openstack" not found Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.408239 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dab9a798-b94d-47b1-bd82-48ff5f477dc5-kube-api-access-kkt2b podName:dab9a798-b94d-47b1-bd82-48ff5f477dc5 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:12.908220417 +0000 UTC m=+1544.935595734 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-kkt2b" (UniqueName: "kubernetes.io/projected/dab9a798-b94d-47b1-bd82-48ff5f477dc5-kube-api-access-kkt2b") pod "keystone0c6f-account-delete-bbxfz" (UID: "dab9a798-b94d-47b1-bd82-48ff5f477dc5") : failed to fetch token: serviceaccounts "galera-openstack" not found Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.452281 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="71cc1f00-1a63-428e-8f12-2136ab077860" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.102:5671: connect: connection refused" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.758263 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b6a593cc-74b3-4a02-ba7a-f4c5d7400476" (UID: "b6a593cc-74b3-4a02-ba7a-f4c5d7400476"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.793691 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": read tcp 10.217.0.2:46146->10.217.0.201:8775: read: connection reset by peer" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.797022 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": read tcp 10.217.0.2:46148->10.217.0.201:8775: read: connection reset by peer" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.813733 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.826700 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.826895 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.837712 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f411fba7-d7b2-4d97-9388-c1b6f57e8328-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f411fba7-d7b2-4d97-9388-c1b6f57e8328" (UID: "f411fba7-d7b2-4d97-9388-c1b6f57e8328"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.937448 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkt2b\" (UniqueName: \"kubernetes.io/projected/dab9a798-b94d-47b1-bd82-48ff5f477dc5-kube-api-access-kkt2b\") pod \"keystone0c6f-account-delete-bbxfz\" (UID: \"dab9a798-b94d-47b1-bd82-48ff5f477dc5\") " pod="openstack/keystone0c6f-account-delete-bbxfz" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.937693 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dab9a798-b94d-47b1-bd82-48ff5f477dc5-operator-scripts\") pod \"keystone0c6f-account-delete-bbxfz\" (UID: \"dab9a798-b94d-47b1-bd82-48ff5f477dc5\") " pod="openstack/keystone0c6f-account-delete-bbxfz" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.937827 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f411fba7-d7b2-4d97-9388-c1b6f57e8328-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.937887 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.937957 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/dab9a798-b94d-47b1-bd82-48ff5f477dc5-operator-scripts podName:dab9a798-b94d-47b1-bd82-48ff5f477dc5 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:13.937916999 +0000 UTC m=+1545.965292166 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/dab9a798-b94d-47b1-bd82-48ff5f477dc5-operator-scripts") pod "keystone0c6f-account-delete-bbxfz" (UID: "dab9a798-b94d-47b1-bd82-48ff5f477dc5") : configmap "openstack-scripts" not found Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.948761 4857 projected.go:194] Error preparing data for projected volume kube-api-access-kkt2b for pod openstack/keystone0c6f-account-delete-bbxfz: failed to fetch token: serviceaccounts "galera-openstack" not found Nov 28 13:44:12 crc kubenswrapper[4857]: E1128 13:44:12.948856 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dab9a798-b94d-47b1-bd82-48ff5f477dc5-kube-api-access-kkt2b podName:dab9a798-b94d-47b1-bd82-48ff5f477dc5 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:13.948807471 +0000 UTC m=+1545.976182638 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-kkt2b" (UniqueName: "kubernetes.io/projected/dab9a798-b94d-47b1-bd82-48ff5f477dc5-kube-api-access-kkt2b") pod "keystone0c6f-account-delete-bbxfz" (UID: "dab9a798-b94d-47b1-bd82-48ff5f477dc5") : failed to fetch token: serviceaccounts "galera-openstack" not found Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.985223 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "b6a593cc-74b3-4a02-ba7a-f4c5d7400476" (UID: "b6a593cc-74b3-4a02-ba7a-f4c5d7400476"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.992104 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41687469-06d7-47ab-ad25-d32df165e1e2-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "41687469-06d7-47ab-ad25-d32df165e1e2" (UID: "41687469-06d7-47ab-ad25-d32df165e1e2"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.003802 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41687469-06d7-47ab-ad25-d32df165e1e2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "41687469-06d7-47ab-ad25-d32df165e1e2" (UID: "41687469-06d7-47ab-ad25-d32df165e1e2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.007293 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a53cec78-89c3-4495-8af6-4caf4f018cc1" (UID: "a53cec78-89c3-4495-8af6-4caf4f018cc1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.012569 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-config-data" (OuterVolumeSpecName: "config-data") pod "b6a593cc-74b3-4a02-ba7a-f4c5d7400476" (UID: "b6a593cc-74b3-4a02-ba7a-f4c5d7400476"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.039251 4857 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/41687469-06d7-47ab-ad25-d32df165e1e2-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.042314 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41687469-06d7-47ab-ad25-d32df165e1e2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.042505 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.042560 4857 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.042617 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.050409 4857 generic.go:334] "Generic (PLEG): container finished" podID="30a2b522-ef43-4b0a-8215-2bb928744e00" containerID="32156962d3c5fd3e7bbc12ce4bc19050625834d7bd9f60bcb681cfb5610ca641" exitCode=0 Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.061818 4857 generic.go:334] "Generic (PLEG): container finished" podID="ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" containerID="ec5937438716528a8aa131c5d6bf8c9a57f6a24f30318571c52d136b077dfcf7" exitCode=0 Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.070024 4857 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/novacell032fc-account-delete-xk7xx" secret="" err="secret \"galera-openstack-dockercfg-nwvzk\" not found" Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.087049 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of af2a21437b5950c07391db6d069bd153d9b422fd5daa52cd346a87417d643f35 is running failed: container process not found" containerID="af2a21437b5950c07391db6d069bd153d9b422fd5daa52cd346a87417d643f35" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.092491 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-config-data" (OuterVolumeSpecName: "config-data") pod "a53cec78-89c3-4495-8af6-4caf4f018cc1" (UID: "a53cec78-89c3-4495-8af6-4caf4f018cc1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.104839 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of af2a21437b5950c07391db6d069bd153d9b422fd5daa52cd346a87417d643f35 is running failed: container process not found" containerID="af2a21437b5950c07391db6d069bd153d9b422fd5daa52cd346a87417d643f35" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.107248 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/novacell032fc-account-delete-xk7xx" podStartSLOduration=6.107234466 podStartE2EDuration="6.107234466s" podCreationTimestamp="2025-11-28 13:44:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:44:13.087557593 +0000 UTC m=+1545.114932760" watchObservedRunningTime="2025-11-28 13:44:13.107234466 +0000 UTC m=+1545.134609633" Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.108602 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of af2a21437b5950c07391db6d069bd153d9b422fd5daa52cd346a87417d643f35 is running failed: container process not found" containerID="af2a21437b5950c07391db6d069bd153d9b422fd5daa52cd346a87417d643f35" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.108732 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of af2a21437b5950c07391db6d069bd153d9b422fd5daa52cd346a87417d643f35 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="f7bf9e28-fd40-4b0d-aac9-995eff12a115" containerName="nova-cell1-conductor-conductor" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.115165 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="7b0c1834-7ece-4d9c-9cf1-28a53aea280e" containerName="galera" containerID="cri-o://1d33095c862babd8cc0dde10c6cb07ce5a2d5f837c975bd8aca7a7cf1568e774" gracePeriod=30 Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.117938 4857 generic.go:334] "Generic (PLEG): container finished" podID="3a952329-a8d9-432d-ac5b-d88b7e2ede6b" containerID="d8e862b58223c1ae15f7828a07974724e3a49c1477b31569a8dbea821c8bc09e" exitCode=0 Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.130099 4857 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/novaapi7cc9-account-delete-qjqg5" secret="" err="secret \"galera-openstack-dockercfg-nwvzk\" not found" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.134874 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f411fba7-d7b2-4d97-9388-c1b6f57e8328-config-data" (OuterVolumeSpecName: "config-data") pod "f411fba7-d7b2-4d97-9388-c1b6f57e8328" (UID: "f411fba7-d7b2-4d97-9388-c1b6f57e8328"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.147477 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.147620 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.148060 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts podName:ad7bc32b-e1f1-4ce5-a094-56f37d676131 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:13.647687824 +0000 UTC m=+1545.675062991 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts") pod "novacell032fc-account-delete-xk7xx" (UID: "ad7bc32b-e1f1-4ce5-a094-56f37d676131") : configmap "openstack-scripts" not found Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.148135 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts podName:6f75b361-6a38-42a4-971c-1b3a68a3f10f nodeName:}" failed. No retries permitted until 2025-11-28 13:44:13.648125736 +0000 UTC m=+1545.675500903 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts") pod "novaapi7cc9-account-delete-qjqg5" (UID: "6f75b361-6a38-42a4-971c-1b3a68a3f10f") : configmap "openstack-scripts" not found Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.152461 4857 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/barbican7dd8-account-delete-jg2j5" secret="" err="secret \"galera-openstack-dockercfg-nwvzk\" not found" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.156820 4857 generic.go:334] "Generic (PLEG): container finished" podID="0d0c82d5-b320-444c-a4d9-838ca3097157" containerID="6307f97c800ac6b026e60dbaa702231a4783caba353d39fc8956c6ce72d5e01e" exitCode=0 Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.159700 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/novaapi7cc9-account-delete-qjqg5" podStartSLOduration=6.159679397 podStartE2EDuration="6.159679397s" podCreationTimestamp="2025-11-28 13:44:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:44:13.150875145 +0000 UTC m=+1545.178250332" watchObservedRunningTime="2025-11-28 13:44:13.159679397 +0000 UTC m=+1545.187054564" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.162295 4857 generic.go:334] "Generic (PLEG): container finished" podID="f7bf9e28-fd40-4b0d-aac9-995eff12a115" containerID="af2a21437b5950c07391db6d069bd153d9b422fd5daa52cd346a87417d643f35" exitCode=0 Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.165819 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f411fba7-d7b2-4d97-9388-c1b6f57e8328-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.165868 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a53cec78-89c3-4495-8af6-4caf4f018cc1-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.179032 4857 generic.go:334] "Generic (PLEG): container finished" podID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerID="758611f27d908e2a9d4f2cb15d9c474f4f04bb2c788bba7c25fe962588bee8ea" exitCode=0 Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.179896 4857 generic.go:334] "Generic (PLEG): container finished" podID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerID="53c5151a4983e3c03ad2115ba0190cda5364aa8956976486e4a3dda5c19894bc" exitCode=2 Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.179981 4857 generic.go:334] "Generic (PLEG): container finished" podID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerID="3e988a9d71b894b528cad9cf749fa687e397d909171f243b57b66253d5c4fcf4" exitCode=0 Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.179838 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "b6a593cc-74b3-4a02-ba7a-f4c5d7400476" (UID: "b6a593cc-74b3-4a02-ba7a-f4c5d7400476"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.189785 4857 generic.go:334] "Generic (PLEG): container finished" podID="946c0669-4c99-46b7-a9ff-437042383642" containerID="4bdd0ee5b2dc8d0eba75e5970152f8cfe9df74f09930b295ed3cf6ddb62ac999" exitCode=0 Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.193494 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican7dd8-account-delete-jg2j5" podStartSLOduration=7.193474494 podStartE2EDuration="7.193474494s" podCreationTimestamp="2025-11-28 13:44:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:44:13.167692476 +0000 UTC m=+1545.195067643" watchObservedRunningTime="2025-11-28 13:44:13.193474494 +0000 UTC m=+1545.220849661" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.197360 4857 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/cinderd8b3-account-delete-lxwj8" secret="" err="secret \"galera-openstack-dockercfg-nwvzk\" not found" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.217740 4857 generic.go:334] "Generic (PLEG): container finished" podID="4477c075-9151-49cc-bb52-82dc34ea46ec" containerID="4dd7dcf6024fd47fb7c4424b294f5cadc4f936ab98e05bb09fe4f5e3d7651e94" exitCode=0 Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.219903 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinderd8b3-account-delete-lxwj8" podStartSLOduration=6.219892901 podStartE2EDuration="6.219892901s" podCreationTimestamp="2025-11-28 13:44:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:44:13.213406065 +0000 UTC m=+1545.240781232" watchObservedRunningTime="2025-11-28 13:44:13.219892901 +0000 UTC m=+1545.247268068" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.225148 4857 generic.go:334] "Generic (PLEG): container finished" podID="1fa6d725-8054-46f1-8c0c-c693d5306563" containerID="5293dd02b3d8cbb50029798677d596c61dae0e02fb1b0ef17359254ce5d584b6" exitCode=2 Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.228232 4857 generic.go:334] "Generic (PLEG): container finished" podID="64da16e3-099d-4def-9656-91f40d64672f" containerID="1146e3ec8a4d803ee31e0a88958bb4723468c3f9bc7e9a7d393734acda6d6b4a" exitCode=0 Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.230388 4857 generic.go:334] "Generic (PLEG): container finished" podID="7bee7127-9367-4882-8ab1-0493128d2641" containerID="15caeb74f903a78a3ff675fa24fc2fa63c9da6eab92af97c459eb92425c7c093" exitCode=0 Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.230619 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.268500 4857 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6a593cc-74b3-4a02-ba7a-f4c5d7400476-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.268585 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.268631 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts podName:d8c0e041-9c74-4a06-a966-833e919e745a nodeName:}" failed. No retries permitted until 2025-11-28 13:44:13.768618225 +0000 UTC m=+1545.795993392 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts") pod "barbican7dd8-account-delete-jg2j5" (UID: "d8c0e041-9c74-4a06-a966-833e919e745a") : configmap "openstack-scripts" not found Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.268726 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.268811 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts podName:24a3dca4-a3d0-479d-9be8-fb8c16f97a77 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:13.768741759 +0000 UTC m=+1545.796116926 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts") pod "cinderd8b3-account-delete-lxwj8" (UID: "24a3dca4-a3d0-479d-9be8-fb8c16f97a77") : configmap "openstack-scripts" not found Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.679103 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.679210 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts podName:ad7bc32b-e1f1-4ce5-a094-56f37d676131 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:14.679177066 +0000 UTC m=+1546.706552233 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts") pod "novacell032fc-account-delete-xk7xx" (UID: "ad7bc32b-e1f1-4ce5-a094-56f37d676131") : configmap "openstack-scripts" not found Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.679137 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.679287 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts podName:6f75b361-6a38-42a4-971c-1b3a68a3f10f nodeName:}" failed. No retries permitted until 2025-11-28 13:44:14.679266279 +0000 UTC m=+1546.706641446 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts") pod "novaapi7cc9-account-delete-qjqg5" (UID: "6f75b361-6a38-42a4-971c-1b3a68a3f10f") : configmap "openstack-scripts" not found Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.779250 4857 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.471s" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780377 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5867-account-create-update-j7btn"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780450 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"30a2b522-ef43-4b0a-8215-2bb928744e00","Type":"ContainerDied","Data":"32156962d3c5fd3e7bbc12ce4bc19050625834d7bd9f60bcb681cfb5610ca641"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780474 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5867-account-create-update-j7btn"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780491 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-gw7vr"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780503 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286","Type":"ContainerDied","Data":"ec5937438716528a8aa131c5d6bf8c9a57f6a24f30318571c52d136b077dfcf7"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780516 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell032fc-account-delete-xk7xx" event={"ID":"ad7bc32b-e1f1-4ce5-a094-56f37d676131","Type":"ContainerStarted","Data":"176a8c3598639b71724934fa1590ebaca77aa44b9b8de6fbcb3127e2a34f1547"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780527 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-gw7vr"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780539 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-856655ccc5-9fgqc" event={"ID":"7358aa80-dbe4-4a31-ad84-9dc125491046","Type":"ContainerDied","Data":"f1269091f3faf9b179a8e0748578833ca79484782be78f7a3c355b5f39a2f17a"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780551 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f1269091f3faf9b179a8e0748578833ca79484782be78f7a3c355b5f39a2f17a" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780559 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3a952329-a8d9-432d-ac5b-d88b7e2ede6b","Type":"ContainerDied","Data":"d8e862b58223c1ae15f7828a07974724e3a49c1477b31569a8dbea821c8bc09e"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780570 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementcdcd-account-delete-h5qc4" event={"ID":"e1ac27d1-5ad2-40ed-af2b-18668e48ead3","Type":"ContainerDied","Data":"289b2e704693fc06f78e08b3a90fd18a768688096c5ca173277c09d24138648e"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780582 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="289b2e704693fc06f78e08b3a90fd18a768688096c5ca173277c09d24138648e" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780589 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi7cc9-account-delete-qjqg5" event={"ID":"6f75b361-6a38-42a4-971c-1b3a68a3f10f","Type":"ContainerStarted","Data":"e0661472bb397b0a3a1dd55baa5c4817a98ad0975c18b4cb900b5650b0bc0b7a"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780598 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican7dd8-account-delete-jg2j5" event={"ID":"d8c0e041-9c74-4a06-a966-833e919e745a","Type":"ContainerStarted","Data":"7b2a4e3bea8fb0cf276592ab346a1a0e736d0c8fe0be99aa787de15196b3f05e"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780608 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-575548d9c6-4zx6z" event={"ID":"0d0c82d5-b320-444c-a4d9-838ca3097157","Type":"ContainerDied","Data":"6307f97c800ac6b026e60dbaa702231a4783caba353d39fc8956c6ce72d5e01e"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780620 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"f7bf9e28-fd40-4b0d-aac9-995eff12a115","Type":"ContainerDied","Data":"af2a21437b5950c07391db6d069bd153d9b422fd5daa52cd346a87417d643f35"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780634 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron5867-account-delete-58twd"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780645 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron5867-account-delete-58twd" event={"ID":"89adcb9a-b993-4e60-ae3b-413bed35ae0d","Type":"ContainerDied","Data":"9274a232b3ca2ca988b7f756090b8c2800a83d2a5c93887b4ad8ab45f68ebed2"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780656 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9274a232b3ca2ca988b7f756090b8c2800a83d2a5c93887b4ad8ab45f68ebed2" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780666 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone0c6f-account-delete-bbxfz"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780677 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc11fd89-0365-46e5-b8b1-48f933611ab9","Type":"ContainerDied","Data":"758611f27d908e2a9d4f2cb15d9c474f4f04bb2c788bba7c25fe962588bee8ea"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780687 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-0c6f-account-create-update-r8x7m"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780697 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-0c6f-account-create-update-r8x7m"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780706 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc11fd89-0365-46e5-b8b1-48f933611ab9","Type":"ContainerDied","Data":"53c5151a4983e3c03ad2115ba0190cda5364aa8956976486e4a3dda5c19894bc"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780715 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc11fd89-0365-46e5-b8b1-48f933611ab9","Type":"ContainerDied","Data":"3e988a9d71b894b528cad9cf749fa687e397d909171f243b57b66253d5c4fcf4"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780726 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-749fd8cf96-rbd6r" event={"ID":"946c0669-4c99-46b7-a9ff-437042383642","Type":"ContainerDied","Data":"4bdd0ee5b2dc8d0eba75e5970152f8cfe9df74f09930b295ed3cf6ddb62ac999"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780737 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-749fd8cf96-rbd6r" event={"ID":"946c0669-4c99-46b7-a9ff-437042383642","Type":"ContainerDied","Data":"28d378399a22b6465e842cbbbb1052a8ad7a94336fe8212837dfd391a9ef8de6"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780744 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="28d378399a22b6465e842cbbbb1052a8ad7a94336fe8212837dfd391a9ef8de6" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780774 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinderd8b3-account-delete-lxwj8" event={"ID":"24a3dca4-a3d0-479d-9be8-fb8c16f97a77","Type":"ContainerStarted","Data":"c87cac856e484a65204ac4a22fcde410a6698e64a13f6990ed0561ebfe6b4815"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780785 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-7sswn"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780797 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-7sswn"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780960 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-7dd8-account-create-update-nlgn4"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.780984 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4477c075-9151-49cc-bb52-82dc34ea46ec","Type":"ContainerDied","Data":"4dd7dcf6024fd47fb7c4424b294f5cadc4f936ab98e05bb09fe4f5e3d7651e94"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781002 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican7dd8-account-delete-jg2j5"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781017 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-t99ql"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781030 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-7dd8-account-create-update-nlgn4"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781045 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1fa6d725-8054-46f1-8c0c-c693d5306563","Type":"ContainerDied","Data":"5293dd02b3d8cbb50029798677d596c61dae0e02fb1b0ef17359254ce5d584b6"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781061 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-t99ql"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781075 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"64da16e3-099d-4def-9656-91f40d64672f","Type":"ContainerDied","Data":"1146e3ec8a4d803ee31e0a88958bb4723468c3f9bc7e9a7d393734acda6d6b4a"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781094 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell10c56-account-delete-6l7tk"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781104 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7bee7127-9367-4882-8ab1-0493128d2641","Type":"ContainerDied","Data":"15caeb74f903a78a3ff675fa24fc2fa63c9da6eab92af97c459eb92425c7c093"} Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781120 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell10c56-account-delete-6l7tk"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781135 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-wd9mq"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781145 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-wd9mq"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781155 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinderd8b3-account-delete-lxwj8"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781167 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-d8b3-account-create-update-r4qcn"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781180 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-d8b3-account-create-update-r4qcn"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781190 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-h28xd"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781202 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-h28xd"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781212 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-7cc9-account-create-update-fvpgx"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781221 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapi7cc9-account-delete-qjqg5"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781234 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-7cc9-account-create-update-fvpgx"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781245 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-57bwd"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781257 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-57bwd"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781267 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-32fc-account-create-update-vvdvc"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781278 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell032fc-account-delete-xk7xx"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.781289 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-32fc-account-create-update-vvdvc"] Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.781954 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.782014 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts podName:d8c0e041-9c74-4a06-a966-833e919e745a nodeName:}" failed. No retries permitted until 2025-11-28 13:44:14.781996429 +0000 UTC m=+1546.809371596 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts") pod "barbican7dd8-account-delete-jg2j5" (UID: "d8c0e041-9c74-4a06-a966-833e919e745a") : configmap "openstack-scripts" not found Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.782344 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.782375 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts podName:24a3dca4-a3d0-479d-9be8-fb8c16f97a77 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:14.78236485 +0000 UTC m=+1546.809740027 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts") pod "cinderd8b3-account-delete-lxwj8" (UID: "24a3dca4-a3d0-479d-9be8-fb8c16f97a77") : configmap "openstack-scripts" not found Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.821558 4857 scope.go:117] "RemoveContainer" containerID="9edd8ca732119343257da06c9d3c8090ac7032d415e0af5cc821df9c9c20bf76" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.883996 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.884018 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftjvm\" (UniqueName: \"kubernetes.io/projected/a81fb5f5-33d2-4da6-86a6-d2f248a3364f-kube-api-access-ftjvm\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.918073 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.918124 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1d33095c862babd8cc0dde10c6cb07ce5a2d5f837c975bd8aca7a7cf1568e774" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.918346 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.919640 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1d33095c862babd8cc0dde10c6cb07ce5a2d5f837c975bd8aca7a7cf1568e774" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.920417 4857 scope.go:117] "RemoveContainer" containerID="c3560dc43f3cda5bfbcfd827097587a99cc977e3650dcfdd935c96bc56677b06" Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.921964 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1d33095c862babd8cc0dde10c6cb07ce5a2d5f837c975bd8aca7a7cf1568e774" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.922008 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="7b0c1834-7ece-4d9c-9cf1-28a53aea280e" containerName="galera" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.939407 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.960473 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-56664b65dc-mkdgh"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.960523 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-56664b65dc-mkdgh"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.960539 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.972037 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:44:13 crc kubenswrapper[4857]: E1128 13:44:13.974630 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-kkt2b operator-scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/keystone0c6f-account-delete-bbxfz" podUID="dab9a798-b94d-47b1-bd82-48ff5f477dc5" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.986938 4857 scope.go:117] "RemoveContainer" containerID="9edd8ca732119343257da06c9d3c8090ac7032d415e0af5cc821df9c9c20bf76" Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.987261 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-internal-tls-certs\") pod \"7358aa80-dbe4-4a31-ad84-9dc125491046\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.987338 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-config-data\") pod \"7358aa80-dbe4-4a31-ad84-9dc125491046\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.987419 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7358aa80-dbe4-4a31-ad84-9dc125491046-log-httpd\") pod \"7358aa80-dbe4-4a31-ad84-9dc125491046\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.987457 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7358aa80-dbe4-4a31-ad84-9dc125491046-etc-swift\") pod \"7358aa80-dbe4-4a31-ad84-9dc125491046\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.987671 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-combined-ca-bundle\") pod \"7358aa80-dbe4-4a31-ad84-9dc125491046\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.987717 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hsxvf\" (UniqueName: \"kubernetes.io/projected/7358aa80-dbe4-4a31-ad84-9dc125491046-kube-api-access-hsxvf\") pod \"7358aa80-dbe4-4a31-ad84-9dc125491046\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.987775 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-public-tls-certs\") pod \"7358aa80-dbe4-4a31-ad84-9dc125491046\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " Nov 28 13:44:13 crc kubenswrapper[4857]: I1128 13:44:13.987798 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7358aa80-dbe4-4a31-ad84-9dc125491046-run-httpd\") pod \"7358aa80-dbe4-4a31-ad84-9dc125491046\" (UID: \"7358aa80-dbe4-4a31-ad84-9dc125491046\") " Nov 28 13:44:14 crc kubenswrapper[4857]: E1128 13:44:14.000500 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9edd8ca732119343257da06c9d3c8090ac7032d415e0af5cc821df9c9c20bf76\": container with ID starting with 9edd8ca732119343257da06c9d3c8090ac7032d415e0af5cc821df9c9c20bf76 not found: ID does not exist" containerID="9edd8ca732119343257da06c9d3c8090ac7032d415e0af5cc821df9c9c20bf76" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.000586 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9edd8ca732119343257da06c9d3c8090ac7032d415e0af5cc821df9c9c20bf76"} err="failed to get container status \"9edd8ca732119343257da06c9d3c8090ac7032d415e0af5cc821df9c9c20bf76\": rpc error: code = NotFound desc = could not find container \"9edd8ca732119343257da06c9d3c8090ac7032d415e0af5cc821df9c9c20bf76\": container with ID starting with 9edd8ca732119343257da06c9d3c8090ac7032d415e0af5cc821df9c9c20bf76 not found: ID does not exist" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.000614 4857 scope.go:117] "RemoveContainer" containerID="c3560dc43f3cda5bfbcfd827097587a99cc977e3650dcfdd935c96bc56677b06" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:13.987340 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron5867-account-delete-58twd" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.003350 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7358aa80-dbe4-4a31-ad84-9dc125491046-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7358aa80-dbe4-4a31-ad84-9dc125491046" (UID: "7358aa80-dbe4-4a31-ad84-9dc125491046"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.003616 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7358aa80-dbe4-4a31-ad84-9dc125491046-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7358aa80-dbe4-4a31-ad84-9dc125491046" (UID: "7358aa80-dbe4-4a31-ad84-9dc125491046"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.006743 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7358aa80-dbe4-4a31-ad84-9dc125491046-kube-api-access-hsxvf" (OuterVolumeSpecName: "kube-api-access-hsxvf") pod "7358aa80-dbe4-4a31-ad84-9dc125491046" (UID: "7358aa80-dbe4-4a31-ad84-9dc125491046"). InnerVolumeSpecName "kube-api-access-hsxvf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: E1128 13:44:14.007203 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3560dc43f3cda5bfbcfd827097587a99cc977e3650dcfdd935c96bc56677b06\": container with ID starting with c3560dc43f3cda5bfbcfd827097587a99cc977e3650dcfdd935c96bc56677b06 not found: ID does not exist" containerID="c3560dc43f3cda5bfbcfd827097587a99cc977e3650dcfdd935c96bc56677b06" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.007239 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3560dc43f3cda5bfbcfd827097587a99cc977e3650dcfdd935c96bc56677b06"} err="failed to get container status \"c3560dc43f3cda5bfbcfd827097587a99cc977e3650dcfdd935c96bc56677b06\": rpc error: code = NotFound desc = could not find container \"c3560dc43f3cda5bfbcfd827097587a99cc977e3650dcfdd935c96bc56677b06\": container with ID starting with c3560dc43f3cda5bfbcfd827097587a99cc977e3650dcfdd935c96bc56677b06 not found: ID does not exist" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.007264 4857 scope.go:117] "RemoveContainer" containerID="89d728886b576deecbaf0ff9d24f62808734b9aa348aaf482ac876e2835345d1" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.008036 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkt2b\" (UniqueName: \"kubernetes.io/projected/dab9a798-b94d-47b1-bd82-48ff5f477dc5-kube-api-access-kkt2b\") pod \"keystone0c6f-account-delete-bbxfz\" (UID: \"dab9a798-b94d-47b1-bd82-48ff5f477dc5\") " pod="openstack/keystone0c6f-account-delete-bbxfz" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.008218 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dab9a798-b94d-47b1-bd82-48ff5f477dc5-operator-scripts\") pod \"keystone0c6f-account-delete-bbxfz\" (UID: \"dab9a798-b94d-47b1-bd82-48ff5f477dc5\") " pod="openstack/keystone0c6f-account-delete-bbxfz" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.008512 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7358aa80-dbe4-4a31-ad84-9dc125491046-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "7358aa80-dbe4-4a31-ad84-9dc125491046" (UID: "7358aa80-dbe4-4a31-ad84-9dc125491046"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.008531 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hsxvf\" (UniqueName: \"kubernetes.io/projected/7358aa80-dbe4-4a31-ad84-9dc125491046-kube-api-access-hsxvf\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.008543 4857 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7358aa80-dbe4-4a31-ad84-9dc125491046-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.008552 4857 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7358aa80-dbe4-4a31-ad84-9dc125491046-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.008553 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 13:44:14 crc kubenswrapper[4857]: E1128 13:44:14.008614 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:14 crc kubenswrapper[4857]: E1128 13:44:14.008657 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/dab9a798-b94d-47b1-bd82-48ff5f477dc5-operator-scripts podName:dab9a798-b94d-47b1-bd82-48ff5f477dc5 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:16.008644127 +0000 UTC m=+1548.036019294 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/dab9a798-b94d-47b1-bd82-48ff5f477dc5-operator-scripts") pod "keystone0c6f-account-delete-bbxfz" (UID: "dab9a798-b94d-47b1-bd82-48ff5f477dc5") : configmap "openstack-scripts" not found Nov 28 13:44:14 crc kubenswrapper[4857]: E1128 13:44:14.012173 4857 projected.go:194] Error preparing data for projected volume kube-api-access-kkt2b for pod openstack/keystone0c6f-account-delete-bbxfz: failed to fetch token: serviceaccounts "galera-openstack" not found Nov 28 13:44:14 crc kubenswrapper[4857]: E1128 13:44:14.012220 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dab9a798-b94d-47b1-bd82-48ff5f477dc5-kube-api-access-kkt2b podName:dab9a798-b94d-47b1-bd82-48ff5f477dc5 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:16.012206379 +0000 UTC m=+1548.039581546 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-kkt2b" (UniqueName: "kubernetes.io/projected/dab9a798-b94d-47b1-bd82-48ff5f477dc5-kube-api-access-kkt2b") pod "keystone0c6f-account-delete-bbxfz" (UID: "dab9a798-b94d-47b1-bd82-48ff5f477dc5") : failed to fetch token: serviceaccounts "galera-openstack" not found Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.021597 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.038666 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placementcdcd-account-delete-h5qc4" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.048211 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.062118 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.078367 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7358aa80-dbe4-4a31-ad84-9dc125491046" (UID: "7358aa80-dbe4-4a31-ad84-9dc125491046"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.078397 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.085824 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.106863 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "7358aa80-dbe4-4a31-ad84-9dc125491046" (UID: "7358aa80-dbe4-4a31-ad84-9dc125491046"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.107741 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-config-data" (OuterVolumeSpecName: "config-data") pod "7358aa80-dbe4-4a31-ad84-9dc125491046" (UID: "7358aa80-dbe4-4a31-ad84-9dc125491046"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109285 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqqr9\" (UniqueName: \"kubernetes.io/projected/e1ac27d1-5ad2-40ed-af2b-18668e48ead3-kube-api-access-mqqr9\") pod \"e1ac27d1-5ad2-40ed-af2b-18668e48ead3\" (UID: \"e1ac27d1-5ad2-40ed-af2b-18668e48ead3\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109342 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1ac27d1-5ad2-40ed-af2b-18668e48ead3-operator-scripts\") pod \"e1ac27d1-5ad2-40ed-af2b-18668e48ead3\" (UID: \"e1ac27d1-5ad2-40ed-af2b-18668e48ead3\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109363 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-config-data\") pod \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109396 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4wx8\" (UniqueName: \"kubernetes.io/projected/89adcb9a-b993-4e60-ae3b-413bed35ae0d-kube-api-access-x4wx8\") pod \"89adcb9a-b993-4e60-ae3b-413bed35ae0d\" (UID: \"89adcb9a-b993-4e60-ae3b-413bed35ae0d\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109428 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-combined-ca-bundle\") pod \"946c0669-4c99-46b7-a9ff-437042383642\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109447 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-combined-ca-bundle\") pod \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109470 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-public-tls-certs\") pod \"946c0669-4c99-46b7-a9ff-437042383642\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109492 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-logs\") pod \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109539 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zpf68\" (UniqueName: \"kubernetes.io/projected/1fa6d725-8054-46f1-8c0c-c693d5306563-kube-api-access-zpf68\") pod \"1fa6d725-8054-46f1-8c0c-c693d5306563\" (UID: \"1fa6d725-8054-46f1-8c0c-c693d5306563\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109726 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-scripts\") pod \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109774 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/946c0669-4c99-46b7-a9ff-437042383642-logs\") pod \"946c0669-4c99-46b7-a9ff-437042383642\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109790 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5lbwb\" (UniqueName: \"kubernetes.io/projected/946c0669-4c99-46b7-a9ff-437042383642-kube-api-access-5lbwb\") pod \"946c0669-4c99-46b7-a9ff-437042383642\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109808 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p5876\" (UniqueName: \"kubernetes.io/projected/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-kube-api-access-p5876\") pod \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109847 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-internal-tls-certs\") pod \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109868 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-config-data\") pod \"946c0669-4c99-46b7-a9ff-437042383642\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109892 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fa6d725-8054-46f1-8c0c-c693d5306563-combined-ca-bundle\") pod \"1fa6d725-8054-46f1-8c0c-c693d5306563\" (UID: \"1fa6d725-8054-46f1-8c0c-c693d5306563\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109937 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-httpd-run\") pod \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109961 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/89adcb9a-b993-4e60-ae3b-413bed35ae0d-operator-scripts\") pod \"89adcb9a-b993-4e60-ae3b-413bed35ae0d\" (UID: \"89adcb9a-b993-4e60-ae3b-413bed35ae0d\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.109977 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-scripts\") pod \"946c0669-4c99-46b7-a9ff-437042383642\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.110005 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/1fa6d725-8054-46f1-8c0c-c693d5306563-kube-state-metrics-tls-config\") pod \"1fa6d725-8054-46f1-8c0c-c693d5306563\" (UID: \"1fa6d725-8054-46f1-8c0c-c693d5306563\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.110024 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-internal-tls-certs\") pod \"946c0669-4c99-46b7-a9ff-437042383642\" (UID: \"946c0669-4c99-46b7-a9ff-437042383642\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.110042 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\" (UID: \"3a952329-a8d9-432d-ac5b-d88b7e2ede6b\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.110059 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fa6d725-8054-46f1-8c0c-c693d5306563-kube-state-metrics-tls-certs\") pod \"1fa6d725-8054-46f1-8c0c-c693d5306563\" (UID: \"1fa6d725-8054-46f1-8c0c-c693d5306563\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.110461 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.110472 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.110481 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.110490 4857 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7358aa80-dbe4-4a31-ad84-9dc125491046-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.112573 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89adcb9a-b993-4e60-ae3b-413bed35ae0d-kube-api-access-x4wx8" (OuterVolumeSpecName: "kube-api-access-x4wx8") pod "89adcb9a-b993-4e60-ae3b-413bed35ae0d" (UID: "89adcb9a-b993-4e60-ae3b-413bed35ae0d"). InnerVolumeSpecName "kube-api-access-x4wx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.112893 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "7358aa80-dbe4-4a31-ad84-9dc125491046" (UID: "7358aa80-dbe4-4a31-ad84-9dc125491046"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.113059 4857 scope.go:117] "RemoveContainer" containerID="bf2b4d3b1fd6b8f1241149d7a1019420f1580f221c1eb7ad343a94db93716eb4" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.113539 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-logs" (OuterVolumeSpecName: "logs") pod "3a952329-a8d9-432d-ac5b-d88b7e2ede6b" (UID: "3a952329-a8d9-432d-ac5b-d88b7e2ede6b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.114182 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89adcb9a-b993-4e60-ae3b-413bed35ae0d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "89adcb9a-b993-4e60-ae3b-413bed35ae0d" (UID: "89adcb9a-b993-4e60-ae3b-413bed35ae0d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.114533 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1ac27d1-5ad2-40ed-af2b-18668e48ead3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e1ac27d1-5ad2-40ed-af2b-18668e48ead3" (UID: "e1ac27d1-5ad2-40ed-af2b-18668e48ead3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.115627 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1ac27d1-5ad2-40ed-af2b-18668e48ead3-kube-api-access-mqqr9" (OuterVolumeSpecName: "kube-api-access-mqqr9") pod "e1ac27d1-5ad2-40ed-af2b-18668e48ead3" (UID: "e1ac27d1-5ad2-40ed-af2b-18668e48ead3"). InnerVolumeSpecName "kube-api-access-mqqr9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.115930 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "3a952329-a8d9-432d-ac5b-d88b7e2ede6b" (UID: "3a952329-a8d9-432d-ac5b-d88b7e2ede6b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.116133 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/946c0669-4c99-46b7-a9ff-437042383642-logs" (OuterVolumeSpecName: "logs") pod "946c0669-4c99-46b7-a9ff-437042383642" (UID: "946c0669-4c99-46b7-a9ff-437042383642"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.118175 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-scripts" (OuterVolumeSpecName: "scripts") pod "946c0669-4c99-46b7-a9ff-437042383642" (UID: "946c0669-4c99-46b7-a9ff-437042383642"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.118260 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-kube-api-access-p5876" (OuterVolumeSpecName: "kube-api-access-p5876") pod "3a952329-a8d9-432d-ac5b-d88b7e2ede6b" (UID: "3a952329-a8d9-432d-ac5b-d88b7e2ede6b"). InnerVolumeSpecName "kube-api-access-p5876". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.118460 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/946c0669-4c99-46b7-a9ff-437042383642-kube-api-access-5lbwb" (OuterVolumeSpecName: "kube-api-access-5lbwb") pod "946c0669-4c99-46b7-a9ff-437042383642" (UID: "946c0669-4c99-46b7-a9ff-437042383642"). InnerVolumeSpecName "kube-api-access-5lbwb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.119770 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.120119 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "3a952329-a8d9-432d-ac5b-d88b7e2ede6b" (UID: "3a952329-a8d9-432d-ac5b-d88b7e2ede6b"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.124192 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-scripts" (OuterVolumeSpecName: "scripts") pod "3a952329-a8d9-432d-ac5b-d88b7e2ede6b" (UID: "3a952329-a8d9-432d-ac5b-d88b7e2ede6b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.146163 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1fa6d725-8054-46f1-8c0c-c693d5306563-kube-api-access-zpf68" (OuterVolumeSpecName: "kube-api-access-zpf68") pod "1fa6d725-8054-46f1-8c0c-c693d5306563" (UID: "1fa6d725-8054-46f1-8c0c-c693d5306563"). InnerVolumeSpecName "kube-api-access-zpf68". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.148726 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.153153 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.165992 4857 scope.go:117] "RemoveContainer" containerID="df7bca3ad7fcc3cc2ac1df9b77614d94b72b4db2a23294091359d1e948b3577e" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.171492 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.172523 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fa6d725-8054-46f1-8c0c-c693d5306563-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "1fa6d725-8054-46f1-8c0c-c693d5306563" (UID: "1fa6d725-8054-46f1-8c0c-c693d5306563"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.181052 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.191316 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glanceeef9-account-delete-chvxh" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.199590 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.210980 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-public-tls-certs\") pod \"7bee7127-9367-4882-8ab1-0493128d2641\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211019 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7bf9e28-fd40-4b0d-aac9-995eff12a115-config-data\") pod \"f7bf9e28-fd40-4b0d-aac9-995eff12a115\" (UID: \"f7bf9e28-fd40-4b0d-aac9-995eff12a115\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211049 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7bee7127-9367-4882-8ab1-0493128d2641-httpd-run\") pod \"7bee7127-9367-4882-8ab1-0493128d2641\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211066 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-combined-ca-bundle\") pod \"64da16e3-099d-4def-9656-91f40d64672f\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211091 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fxg8t\" (UniqueName: \"kubernetes.io/projected/bf861df0-ad6e-4a39-9932-395afa59e76d-kube-api-access-fxg8t\") pod \"bf861df0-ad6e-4a39-9932-395afa59e76d\" (UID: \"bf861df0-ad6e-4a39-9932-395afa59e76d\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211109 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-public-tls-certs\") pod \"4477c075-9151-49cc-bb52-82dc34ea46ec\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211128 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-combined-ca-bundle\") pod \"7bee7127-9367-4882-8ab1-0493128d2641\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211165 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-public-tls-certs\") pod \"0d0c82d5-b320-444c-a4d9-838ca3097157\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211189 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-config-data\") pod \"0d0c82d5-b320-444c-a4d9-838ca3097157\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211208 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-combined-ca-bundle\") pod \"4477c075-9151-49cc-bb52-82dc34ea46ec\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211274 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/30a2b522-ef43-4b0a-8215-2bb928744e00-config-data\") pod \"30a2b522-ef43-4b0a-8215-2bb928744e00\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211292 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g7bjq\" (UniqueName: \"kubernetes.io/projected/7bee7127-9367-4882-8ab1-0493128d2641-kube-api-access-g7bjq\") pod \"7bee7127-9367-4882-8ab1-0493128d2641\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211355 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9kp9n\" (UniqueName: \"kubernetes.io/projected/f7bf9e28-fd40-4b0d-aac9-995eff12a115-kube-api-access-9kp9n\") pod \"f7bf9e28-fd40-4b0d-aac9-995eff12a115\" (UID: \"f7bf9e28-fd40-4b0d-aac9-995eff12a115\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211374 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5qjn8\" (UniqueName: \"kubernetes.io/projected/0d0c82d5-b320-444c-a4d9-838ca3097157-kube-api-access-5qjn8\") pod \"0d0c82d5-b320-444c-a4d9-838ca3097157\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211398 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7bee7127-9367-4882-8ab1-0493128d2641-logs\") pod \"7bee7127-9367-4882-8ab1-0493128d2641\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211419 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4477c075-9151-49cc-bb52-82dc34ea46ec-etc-machine-id\") pod \"4477c075-9151-49cc-bb52-82dc34ea46ec\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211437 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-internal-tls-certs\") pod \"4477c075-9151-49cc-bb52-82dc34ea46ec\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211456 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-scripts\") pod \"4477c075-9151-49cc-bb52-82dc34ea46ec\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211484 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d0c82d5-b320-444c-a4d9-838ca3097157-logs\") pod \"0d0c82d5-b320-444c-a4d9-838ca3097157\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211499 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4477c075-9151-49cc-bb52-82dc34ea46ec-logs\") pod \"4477c075-9151-49cc-bb52-82dc34ea46ec\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211514 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64da16e3-099d-4def-9656-91f40d64672f-logs\") pod \"64da16e3-099d-4def-9656-91f40d64672f\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211534 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30a2b522-ef43-4b0a-8215-2bb928744e00-combined-ca-bundle\") pod \"30a2b522-ef43-4b0a-8215-2bb928744e00\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211553 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-internal-tls-certs\") pod \"0d0c82d5-b320-444c-a4d9-838ca3097157\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211568 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-public-tls-certs\") pod \"64da16e3-099d-4def-9656-91f40d64672f\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211586 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pskmk\" (UniqueName: \"kubernetes.io/projected/30a2b522-ef43-4b0a-8215-2bb928744e00-kube-api-access-pskmk\") pod \"30a2b522-ef43-4b0a-8215-2bb928744e00\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211618 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrdq7\" (UniqueName: \"kubernetes.io/projected/64da16e3-099d-4def-9656-91f40d64672f-kube-api-access-wrdq7\") pod \"64da16e3-099d-4def-9656-91f40d64672f\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211646 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-config-data\") pod \"64da16e3-099d-4def-9656-91f40d64672f\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211666 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-scripts\") pod \"7bee7127-9367-4882-8ab1-0493128d2641\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211697 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-config-data\") pod \"7bee7127-9367-4882-8ab1-0493128d2641\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211725 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-config-data-custom\") pod \"4477c075-9151-49cc-bb52-82dc34ea46ec\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211741 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/30a2b522-ef43-4b0a-8215-2bb928744e00-kolla-config\") pod \"30a2b522-ef43-4b0a-8215-2bb928744e00\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.211923 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7bf9e28-fd40-4b0d-aac9-995eff12a115-combined-ca-bundle\") pod \"f7bf9e28-fd40-4b0d-aac9-995eff12a115\" (UID: \"f7bf9e28-fd40-4b0d-aac9-995eff12a115\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.212514 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bee7127-9367-4882-8ab1-0493128d2641-logs" (OuterVolumeSpecName: "logs") pod "7bee7127-9367-4882-8ab1-0493128d2641" (UID: "7bee7127-9367-4882-8ab1-0493128d2641"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.212644 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/30a2b522-ef43-4b0a-8215-2bb928744e00-memcached-tls-certs\") pod \"30a2b522-ef43-4b0a-8215-2bb928744e00\" (UID: \"30a2b522-ef43-4b0a-8215-2bb928744e00\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.212670 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-config-data\") pod \"4477c075-9151-49cc-bb52-82dc34ea46ec\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.212696 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-internal-tls-certs\") pod \"64da16e3-099d-4def-9656-91f40d64672f\" (UID: \"64da16e3-099d-4def-9656-91f40d64672f\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.212715 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-config-data-custom\") pod \"0d0c82d5-b320-444c-a4d9-838ca3097157\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.212760 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w6q5\" (UniqueName: \"kubernetes.io/projected/4477c075-9151-49cc-bb52-82dc34ea46ec-kube-api-access-2w6q5\") pod \"4477c075-9151-49cc-bb52-82dc34ea46ec\" (UID: \"4477c075-9151-49cc-bb52-82dc34ea46ec\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.212781 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"7bee7127-9367-4882-8ab1-0493128d2641\" (UID: \"7bee7127-9367-4882-8ab1-0493128d2641\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.212818 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-combined-ca-bundle\") pod \"0d0c82d5-b320-444c-a4d9-838ca3097157\" (UID: \"0d0c82d5-b320-444c-a4d9-838ca3097157\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.212835 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf861df0-ad6e-4a39-9932-395afa59e76d-operator-scripts\") pod \"bf861df0-ad6e-4a39-9932-395afa59e76d\" (UID: \"bf861df0-ad6e-4a39-9932-395afa59e76d\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.213883 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bee7127-9367-4882-8ab1-0493128d2641-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "7bee7127-9367-4882-8ab1-0493128d2641" (UID: "7bee7127-9367-4882-8ab1-0493128d2641"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.214383 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.214402 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/89adcb9a-b993-4e60-ae3b-413bed35ae0d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.214414 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.214423 4857 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/1fa6d725-8054-46f1-8c0c-c693d5306563-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.214444 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.214455 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7bee7127-9367-4882-8ab1-0493128d2641-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.214729 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqqr9\" (UniqueName: \"kubernetes.io/projected/e1ac27d1-5ad2-40ed-af2b-18668e48ead3-kube-api-access-mqqr9\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.214771 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1ac27d1-5ad2-40ed-af2b-18668e48ead3-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.214784 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4wx8\" (UniqueName: \"kubernetes.io/projected/89adcb9a-b993-4e60-ae3b-413bed35ae0d-kube-api-access-x4wx8\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.214793 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.214801 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7bee7127-9367-4882-8ab1-0493128d2641-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.214811 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zpf68\" (UniqueName: \"kubernetes.io/projected/1fa6d725-8054-46f1-8c0c-c693d5306563-kube-api-access-zpf68\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.214819 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.214827 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/946c0669-4c99-46b7-a9ff-437042383642-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.214851 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5lbwb\" (UniqueName: \"kubernetes.io/projected/946c0669-4c99-46b7-a9ff-437042383642-kube-api-access-5lbwb\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.214863 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p5876\" (UniqueName: \"kubernetes.io/projected/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-kube-api-access-p5876\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.214873 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7358aa80-dbe4-4a31-ad84-9dc125491046-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.221110 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.222173 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bee7127-9367-4882-8ab1-0493128d2641-kube-api-access-g7bjq" (OuterVolumeSpecName: "kube-api-access-g7bjq") pod "7bee7127-9367-4882-8ab1-0493128d2641" (UID: "7bee7127-9367-4882-8ab1-0493128d2641"). InnerVolumeSpecName "kube-api-access-g7bjq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.230158 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4477c075-9151-49cc-bb52-82dc34ea46ec-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "4477c075-9151-49cc-bb52-82dc34ea46ec" (UID: "4477c075-9151-49cc-bb52-82dc34ea46ec"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.232456 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/30a2b522-ef43-4b0a-8215-2bb928744e00-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "30a2b522-ef43-4b0a-8215-2bb928744e00" (UID: "30a2b522-ef43-4b0a-8215-2bb928744e00"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.233136 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/30a2b522-ef43-4b0a-8215-2bb928744e00-config-data" (OuterVolumeSpecName: "config-data") pod "30a2b522-ef43-4b0a-8215-2bb928744e00" (UID: "30a2b522-ef43-4b0a-8215-2bb928744e00"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.236516 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf861df0-ad6e-4a39-9932-395afa59e76d-kube-api-access-fxg8t" (OuterVolumeSpecName: "kube-api-access-fxg8t") pod "bf861df0-ad6e-4a39-9932-395afa59e76d" (UID: "bf861df0-ad6e-4a39-9932-395afa59e76d"). InnerVolumeSpecName "kube-api-access-fxg8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.237187 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d0c82d5-b320-444c-a4d9-838ca3097157-logs" (OuterVolumeSpecName: "logs") pod "0d0c82d5-b320-444c-a4d9-838ca3097157" (UID: "0d0c82d5-b320-444c-a4d9-838ca3097157"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.237594 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf861df0-ad6e-4a39-9932-395afa59e76d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bf861df0-ad6e-4a39-9932-395afa59e76d" (UID: "bf861df0-ad6e-4a39-9932-395afa59e76d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.241853 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0d0c82d5-b320-444c-a4d9-838ca3097157" (UID: "0d0c82d5-b320-444c-a4d9-838ca3097157"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.242621 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "4477c075-9151-49cc-bb52-82dc34ea46ec" (UID: "4477c075-9151-49cc-bb52-82dc34ea46ec"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.244029 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-scripts" (OuterVolumeSpecName: "scripts") pod "4477c075-9151-49cc-bb52-82dc34ea46ec" (UID: "4477c075-9151-49cc-bb52-82dc34ea46ec"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.244820 4857 scope.go:117] "RemoveContainer" containerID="8b3ff8b7cb9bbbd5d33a06e4dc7773db3800416089a3b589b96e2930ebcb5b38" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.245309 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4477c075-9151-49cc-bb52-82dc34ea46ec-logs" (OuterVolumeSpecName: "logs") pod "4477c075-9151-49cc-bb52-82dc34ea46ec" (UID: "4477c075-9151-49cc-bb52-82dc34ea46ec"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.248433 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64da16e3-099d-4def-9656-91f40d64672f-kube-api-access-wrdq7" (OuterVolumeSpecName: "kube-api-access-wrdq7") pod "64da16e3-099d-4def-9656-91f40d64672f" (UID: "64da16e3-099d-4def-9656-91f40d64672f"). InnerVolumeSpecName "kube-api-access-wrdq7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.249596 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "7bee7127-9367-4882-8ab1-0493128d2641" (UID: "7bee7127-9367-4882-8ab1-0493128d2641"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.249820 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30a2b522-ef43-4b0a-8215-2bb928744e00-kube-api-access-pskmk" (OuterVolumeSpecName: "kube-api-access-pskmk") pod "30a2b522-ef43-4b0a-8215-2bb928744e00" (UID: "30a2b522-ef43-4b0a-8215-2bb928744e00"). InnerVolumeSpecName "kube-api-access-pskmk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.250260 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.256064 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64da16e3-099d-4def-9656-91f40d64672f-logs" (OuterVolumeSpecName: "logs") pod "64da16e3-099d-4def-9656-91f40d64672f" (UID: "64da16e3-099d-4def-9656-91f40d64672f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.256231 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d0c82d5-b320-444c-a4d9-838ca3097157-kube-api-access-5qjn8" (OuterVolumeSpecName: "kube-api-access-5qjn8") pod "0d0c82d5-b320-444c-a4d9-838ca3097157" (UID: "0d0c82d5-b320-444c-a4d9-838ca3097157"). InnerVolumeSpecName "kube-api-access-5qjn8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.259251 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-scripts" (OuterVolumeSpecName: "scripts") pod "7bee7127-9367-4882-8ab1-0493128d2641" (UID: "7bee7127-9367-4882-8ab1-0493128d2641"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.259458 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4477c075-9151-49cc-bb52-82dc34ea46ec-kube-api-access-2w6q5" (OuterVolumeSpecName: "kube-api-access-2w6q5") pod "4477c075-9151-49cc-bb52-82dc34ea46ec" (UID: "4477c075-9151-49cc-bb52-82dc34ea46ec"). InnerVolumeSpecName "kube-api-access-2w6q5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.294555 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7bf9e28-fd40-4b0d-aac9-995eff12a115-kube-api-access-9kp9n" (OuterVolumeSpecName: "kube-api-access-9kp9n") pod "f7bf9e28-fd40-4b0d-aac9-995eff12a115" (UID: "f7bf9e28-fd40-4b0d-aac9-995eff12a115"). InnerVolumeSpecName "kube-api-access-9kp9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.297932 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286","Type":"ContainerDied","Data":"553e1bee8ec433037849576640aa295afd2274c2d101bf044e81f16bf32c1a61"} Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.298043 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.301486 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glanceeef9-account-delete-chvxh" event={"ID":"bf861df0-ad6e-4a39-9932-395afa59e76d","Type":"ContainerDied","Data":"7abe9a1c5042790105001c559890bd4e71df61c138912ae23e81360e2ec7ce21"} Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.301510 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7abe9a1c5042790105001c559890bd4e71df61c138912ae23e81360e2ec7ce21" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.301574 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glanceeef9-account-delete-chvxh" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.306093 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4477c075-9151-49cc-bb52-82dc34ea46ec","Type":"ContainerDied","Data":"8536aff5d0d5a3e5608298ae83482182f56a37bc4e05ab9d3c51b4609423a2b5"} Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.306280 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.310834 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.321310 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-combined-ca-bundle\") pod \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.321397 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/310b8699-5d0c-4cce-b8fd-90ccedc2ce85-config-data\") pod \"310b8699-5d0c-4cce-b8fd-90ccedc2ce85\" (UID: \"310b8699-5d0c-4cce-b8fd-90ccedc2ce85\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.321472 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dqckr\" (UniqueName: \"kubernetes.io/projected/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-kube-api-access-dqckr\") pod \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.321695 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-logs\") pod \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.321793 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd-config-data\") pod \"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd\" (UID: \"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.321865 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t5xfn\" (UniqueName: \"kubernetes.io/projected/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd-kube-api-access-t5xfn\") pod \"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd\" (UID: \"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.321911 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-nova-metadata-tls-certs\") pod \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.321952 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd-combined-ca-bundle\") pod \"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd\" (UID: \"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.321982 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5rdhz\" (UniqueName: \"kubernetes.io/projected/310b8699-5d0c-4cce-b8fd-90ccedc2ce85-kube-api-access-5rdhz\") pod \"310b8699-5d0c-4cce-b8fd-90ccedc2ce85\" (UID: \"310b8699-5d0c-4cce-b8fd-90ccedc2ce85\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.322005 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310b8699-5d0c-4cce-b8fd-90ccedc2ce85-combined-ca-bundle\") pod \"310b8699-5d0c-4cce-b8fd-90ccedc2ce85\" (UID: \"310b8699-5d0c-4cce-b8fd-90ccedc2ce85\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.322063 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-config-data\") pod \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\" (UID: \"ea2604b9-e3ca-4145-b8c3-42a9b8e3b286\") " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.330808 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd-kube-api-access-t5xfn" (OuterVolumeSpecName: "kube-api-access-t5xfn") pod "2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd" (UID: "2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd"). InnerVolumeSpecName "kube-api-access-t5xfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331006 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fxg8t\" (UniqueName: \"kubernetes.io/projected/bf861df0-ad6e-4a39-9932-395afa59e76d-kube-api-access-fxg8t\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331046 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/30a2b522-ef43-4b0a-8215-2bb928744e00-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331064 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g7bjq\" (UniqueName: \"kubernetes.io/projected/7bee7127-9367-4882-8ab1-0493128d2641-kube-api-access-g7bjq\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331087 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5qjn8\" (UniqueName: \"kubernetes.io/projected/0d0c82d5-b320-444c-a4d9-838ca3097157-kube-api-access-5qjn8\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331167 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9kp9n\" (UniqueName: \"kubernetes.io/projected/f7bf9e28-fd40-4b0d-aac9-995eff12a115-kube-api-access-9kp9n\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331183 4857 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4477c075-9151-49cc-bb52-82dc34ea46ec-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331203 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331218 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d0c82d5-b320-444c-a4d9-838ca3097157-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331230 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4477c075-9151-49cc-bb52-82dc34ea46ec-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331242 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64da16e3-099d-4def-9656-91f40d64672f-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331262 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pskmk\" (UniqueName: \"kubernetes.io/projected/30a2b522-ef43-4b0a-8215-2bb928744e00-kube-api-access-pskmk\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331276 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrdq7\" (UniqueName: \"kubernetes.io/projected/64da16e3-099d-4def-9656-91f40d64672f-kube-api-access-wrdq7\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331289 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331303 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331320 4857 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/30a2b522-ef43-4b0a-8215-2bb928744e00-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331333 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331346 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w6q5\" (UniqueName: \"kubernetes.io/projected/4477c075-9151-49cc-bb52-82dc34ea46ec-kube-api-access-2w6q5\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331390 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331409 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf861df0-ad6e-4a39-9932-395afa59e76d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.331944 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-logs" (OuterVolumeSpecName: "logs") pod "ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" (UID: "ea2604b9-e3ca-4145-b8c3-42a9b8e3b286"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.352194 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1df1224d-12ff-4f52-bdbe-533b53f8991c" path="/var/lib/kubelet/pods/1df1224d-12ff-4f52-bdbe-533b53f8991c/volumes" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.359512 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="205a6e10-4a06-4d87-b90f-5787e68be49d" path="/var/lib/kubelet/pods/205a6e10-4a06-4d87-b90f-5787e68be49d/volumes" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.365118 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-575548d9c6-4zx6z" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.365962 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b16d37a-3848-4af5-a224-1e50a611c2d7" path="/var/lib/kubelet/pods/2b16d37a-3848-4af5-a224-1e50a611c2d7/volumes" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.368451 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41687469-06d7-47ab-ad25-d32df165e1e2" path="/var/lib/kubelet/pods/41687469-06d7-47ab-ad25-d32df165e1e2/volumes" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.369555 4857 generic.go:334] "Generic (PLEG): container finished" podID="2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd" containerID="165d4ed1ed3f722b3e2c4f5769e7c056c9eafbeeed14d27d5233ab4f09a4ef4d" exitCode=0 Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.369699 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.372210 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="624fc019-ce85-46f4-b6b9-fc4e5f4fdfac" path="/var/lib/kubelet/pods/624fc019-ce85-46f4-b6b9-fc4e5f4fdfac/volumes" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.373144 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86982bf5-4b00-4172-b9bc-ac852da2c721" path="/var/lib/kubelet/pods/86982bf5-4b00-4172-b9bc-ac852da2c721/volumes" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.373860 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e187383-f03d-483d-bb3a-afe69f2a9d73" path="/var/lib/kubelet/pods/8e187383-f03d-483d-bb3a-afe69f2a9d73/volumes" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.374942 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633" path="/var/lib/kubelet/pods/9ffd5c4d-7e4f-43b8-8b18-5b2a18b33633/volumes" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.380436 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a25e9cdb-52db-418e-a094-2c6e0cc860eb" path="/var/lib/kubelet/pods/a25e9cdb-52db-418e-a094-2c6e0cc860eb/volumes" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.380561 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-kube-api-access-dqckr" (OuterVolumeSpecName: "kube-api-access-dqckr") pod "ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" (UID: "ea2604b9-e3ca-4145-b8c3-42a9b8e3b286"). InnerVolumeSpecName "kube-api-access-dqckr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.381087 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a53cec78-89c3-4495-8af6-4caf4f018cc1" path="/var/lib/kubelet/pods/a53cec78-89c3-4495-8af6-4caf4f018cc1/volumes" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.382843 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a81fb5f5-33d2-4da6-86a6-d2f248a3364f" path="/var/lib/kubelet/pods/a81fb5f5-33d2-4da6-86a6-d2f248a3364f/volumes" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.389029 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8bf5691-6407-4038-8a23-3d562ec05262" path="/var/lib/kubelet/pods/a8bf5691-6407-4038-8a23-3d562ec05262/volumes" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.389927 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1f7e362-6e6b-4636-b551-4533ad037811" path="/var/lib/kubelet/pods/b1f7e362-6e6b-4636-b551-4533ad037811/volumes" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.390684 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6a593cc-74b3-4a02-ba7a-f4c5d7400476" path="/var/lib/kubelet/pods/b6a593cc-74b3-4a02-ba7a-f4c5d7400476/volumes" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.392023 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7118342-937d-4707-b384-31729648d90d" path="/var/lib/kubelet/pods/b7118342-937d-4707-b384-31729648d90d/volumes" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.392792 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3ffaab3-cd89-4c3c-87fb-6862af41d2cb" path="/var/lib/kubelet/pods/d3ffaab3-cd89-4c3c-87fb-6862af41d2cb/volumes" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.393137 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/310b8699-5d0c-4cce-b8fd-90ccedc2ce85-kube-api-access-5rdhz" (OuterVolumeSpecName: "kube-api-access-5rdhz") pod "310b8699-5d0c-4cce-b8fd-90ccedc2ce85" (UID: "310b8699-5d0c-4cce-b8fd-90ccedc2ce85"). InnerVolumeSpecName "kube-api-access-5rdhz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.393624 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f411fba7-d7b2-4d97-9388-c1b6f57e8328" path="/var/lib/kubelet/pods/f411fba7-d7b2-4d97-9388-c1b6f57e8328/volumes" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.395313 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.398659 4857 generic.go:334] "Generic (PLEG): container finished" podID="310b8699-5d0c-4cce-b8fd-90ccedc2ce85" containerID="8b4e711ca231fe7452ef2052a4b12b122b8a749ee4670a4b691165e5f548e102" exitCode=0 Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.398745 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.449057 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.460067 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t5xfn\" (UniqueName: \"kubernetes.io/projected/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd-kube-api-access-t5xfn\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.460096 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5rdhz\" (UniqueName: \"kubernetes.io/projected/310b8699-5d0c-4cce-b8fd-90ccedc2ce85-kube-api-access-5rdhz\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.460108 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dqckr\" (UniqueName: \"kubernetes.io/projected/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-kube-api-access-dqckr\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.460116 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.464636 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.469486 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.471526 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.474965 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.476878 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-856655ccc5-9fgqc" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.478670 4857 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/barbican7dd8-account-delete-jg2j5" secret="" err="secret \"galera-openstack-dockercfg-nwvzk\" not found" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.478895 4857 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/cinderd8b3-account-delete-lxwj8" secret="" err="secret \"galera-openstack-dockercfg-nwvzk\" not found" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.479329 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron5867-account-delete-58twd" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.479450 4857 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/novaapi7cc9-account-delete-qjqg5" secret="" err="secret \"galera-openstack-dockercfg-nwvzk\" not found" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.493215 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-749fd8cf96-rbd6r" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.493613 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placementcdcd-account-delete-h5qc4" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.493660 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone0c6f-account-delete-bbxfz" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.496407 4857 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/novacell032fc-account-delete-xk7xx" secret="" err="secret \"galera-openstack-dockercfg-nwvzk\" not found" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.521148 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-config-data" (OuterVolumeSpecName: "config-data") pod "946c0669-4c99-46b7-a9ff-437042383642" (UID: "946c0669-4c99-46b7-a9ff-437042383642"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.524908 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fa6d725-8054-46f1-8c0c-c693d5306563-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "1fa6d725-8054-46f1-8c0c-c693d5306563" (UID: "1fa6d725-8054-46f1-8c0c-c693d5306563"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.531892 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7bf9e28-fd40-4b0d-aac9-995eff12a115-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f7bf9e28-fd40-4b0d-aac9-995eff12a115" (UID: "f7bf9e28-fd40-4b0d-aac9-995eff12a115"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.562193 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7bf9e28-fd40-4b0d-aac9-995eff12a115-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.562226 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.562236 4857 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fa6d725-8054-46f1-8c0c-c693d5306563-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.562246 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.562390 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4477c075-9151-49cc-bb52-82dc34ea46ec" (UID: "4477c075-9151-49cc-bb52-82dc34ea46ec"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.589825 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7bf9e28-fd40-4b0d-aac9-995eff12a115-config-data" (OuterVolumeSpecName: "config-data") pod "f7bf9e28-fd40-4b0d-aac9-995eff12a115" (UID: "f7bf9e28-fd40-4b0d-aac9-995eff12a115"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.589916 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-config-data" (OuterVolumeSpecName: "config-data") pod "3a952329-a8d9-432d-ac5b-d88b7e2ede6b" (UID: "3a952329-a8d9-432d-ac5b-d88b7e2ede6b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.627190 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3a952329-a8d9-432d-ac5b-d88b7e2ede6b" (UID: "3a952329-a8d9-432d-ac5b-d88b7e2ede6b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.628040 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fa6d725-8054-46f1-8c0c-c693d5306563-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1fa6d725-8054-46f1-8c0c-c693d5306563" (UID: "1fa6d725-8054-46f1-8c0c-c693d5306563"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.631374 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "946c0669-4c99-46b7-a9ff-437042383642" (UID: "946c0669-4c99-46b7-a9ff-437042383642"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.631416 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30a2b522-ef43-4b0a-8215-2bb928744e00-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "30a2b522-ef43-4b0a-8215-2bb928744e00" (UID: "30a2b522-ef43-4b0a-8215-2bb928744e00"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.638390 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/310b8699-5d0c-4cce-b8fd-90ccedc2ce85-config-data" (OuterVolumeSpecName: "config-data") pod "310b8699-5d0c-4cce-b8fd-90ccedc2ce85" (UID: "310b8699-5d0c-4cce-b8fd-90ccedc2ce85"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.641303 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.663828 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.663869 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7bf9e28-fd40-4b0d-aac9-995eff12a115-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.663882 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/310b8699-5d0c-4cce-b8fd-90ccedc2ce85-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.663894 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.663906 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.663917 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.663929 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.663945 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30a2b522-ef43-4b0a-8215-2bb928744e00-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.664040 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fa6d725-8054-46f1-8c0c-c693d5306563-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.681167 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7bee7127-9367-4882-8ab1-0493128d2641" (UID: "7bee7127-9367-4882-8ab1-0493128d2641"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.685568 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0d0c82d5-b320-444c-a4d9-838ca3097157" (UID: "0d0c82d5-b320-444c-a4d9-838ca3097157"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.713924 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" (UID: "ea2604b9-e3ca-4145-b8c3-42a9b8e3b286"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.726855 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "64da16e3-099d-4def-9656-91f40d64672f" (UID: "64da16e3-099d-4def-9656-91f40d64672f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.726989 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd-config-data" (OuterVolumeSpecName: "config-data") pod "2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd" (UID: "2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.735337 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-config-data" (OuterVolumeSpecName: "config-data") pod "0d0c82d5-b320-444c-a4d9-838ca3097157" (UID: "0d0c82d5-b320-444c-a4d9-838ca3097157"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.735992 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd" (UID: "2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.750787 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "0d0c82d5-b320-444c-a4d9-838ca3097157" (UID: "0d0c82d5-b320-444c-a4d9-838ca3097157"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.760029 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "64da16e3-099d-4def-9656-91f40d64672f" (UID: "64da16e3-099d-4def-9656-91f40d64672f"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.765051 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.765083 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.765092 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.765102 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.765137 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.765145 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.765154 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.765163 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.765171 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.765178 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "946c0669-4c99-46b7-a9ff-437042383642" (UID: "946c0669-4c99-46b7-a9ff-437042383642"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: E1128 13:44:14.765228 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:14 crc kubenswrapper[4857]: E1128 13:44:14.765271 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts podName:ad7bc32b-e1f1-4ce5-a094-56f37d676131 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:16.765257834 +0000 UTC m=+1548.792633001 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts") pod "novacell032fc-account-delete-xk7xx" (UID: "ad7bc32b-e1f1-4ce5-a094-56f37d676131") : configmap "openstack-scripts" not found Nov 28 13:44:14 crc kubenswrapper[4857]: E1128 13:44:14.765270 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 13:44:14 crc kubenswrapper[4857]: E1128 13:44:14.765303 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:14 crc kubenswrapper[4857]: E1128 13:44:14.765321 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts podName:6f75b361-6a38-42a4-971c-1b3a68a3f10f nodeName:}" failed. No retries permitted until 2025-11-28 13:44:16.765315545 +0000 UTC m=+1548.792690712 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts") pod "novaapi7cc9-account-delete-qjqg5" (UID: "6f75b361-6a38-42a4-971c-1b3a68a3f10f") : configmap "openstack-scripts" not found Nov 28 13:44:14 crc kubenswrapper[4857]: E1128 13:44:14.765509 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-config-data podName:71cc1f00-1a63-428e-8f12-2136ab077860 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:22.76549031 +0000 UTC m=+1554.792865477 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-config-data") pod "rabbitmq-cell1-server-0" (UID: "71cc1f00-1a63-428e-8f12-2136ab077860") : configmap "rabbitmq-cell1-config-data" not found Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.787026 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-config-data" (OuterVolumeSpecName: "config-data") pod "ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" (UID: "ea2604b9-e3ca-4145-b8c3-42a9b8e3b286"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.787895 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-config-data" (OuterVolumeSpecName: "config-data") pod "64da16e3-099d-4def-9656-91f40d64672f" (UID: "64da16e3-099d-4def-9656-91f40d64672f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.791628 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "0d0c82d5-b320-444c-a4d9-838ca3097157" (UID: "0d0c82d5-b320-444c-a4d9-838ca3097157"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.795416 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/310b8699-5d0c-4cce-b8fd-90ccedc2ce85-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "310b8699-5d0c-4cce-b8fd-90ccedc2ce85" (UID: "310b8699-5d0c-4cce-b8fd-90ccedc2ce85"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.802120 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "946c0669-4c99-46b7-a9ff-437042383642" (UID: "946c0669-4c99-46b7-a9ff-437042383642"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.805658 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" (UID: "ea2604b9-e3ca-4145-b8c3-42a9b8e3b286"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.810033 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3a952329-a8d9-432d-ac5b-d88b7e2ede6b" (UID: "3a952329-a8d9-432d-ac5b-d88b7e2ede6b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.818342 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "4477c075-9151-49cc-bb52-82dc34ea46ec" (UID: "4477c075-9151-49cc-bb52-82dc34ea46ec"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.818399 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "4477c075-9151-49cc-bb52-82dc34ea46ec" (UID: "4477c075-9151-49cc-bb52-82dc34ea46ec"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.821831 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "64da16e3-099d-4def-9656-91f40d64672f" (UID: "64da16e3-099d-4def-9656-91f40d64672f"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.823672 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-config-data" (OuterVolumeSpecName: "config-data") pod "7bee7127-9367-4882-8ab1-0493128d2641" (UID: "7bee7127-9367-4882-8ab1-0493128d2641"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.842290 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30a2b522-ef43-4b0a-8215-2bb928744e00-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "30a2b522-ef43-4b0a-8215-2bb928744e00" (UID: "30a2b522-ef43-4b0a-8215-2bb928744e00"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.852102 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3a952329-a8d9-432d-ac5b-d88b7e2ede6b","Type":"ContainerDied","Data":"33e7f3856876296097f9a72ec4da890288bb9eeafee3de17fdeeeafb9e5f1c6e"} Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.852252 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-575548d9c6-4zx6z" event={"ID":"0d0c82d5-b320-444c-a4d9-838ca3097157","Type":"ContainerDied","Data":"578ec44bae4cc47b2a5a3b427aa426a702302e488a3c16e25719531dba92e1ee"} Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.852365 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd","Type":"ContainerDied","Data":"165d4ed1ed3f722b3e2c4f5769e7c056c9eafbeeed14d27d5233ab4f09a4ef4d"} Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.852441 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd","Type":"ContainerDied","Data":"a811bf4cbc223421dafda56d2fed783117ed79187cf2caf13350b1a08142c53f"} Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.852571 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"f7bf9e28-fd40-4b0d-aac9-995eff12a115","Type":"ContainerDied","Data":"426fecab0952c028776039daf03243cae7cd1f7183e4fcccea5133ee27cd2596"} Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.852659 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"310b8699-5d0c-4cce-b8fd-90ccedc2ce85","Type":"ContainerDied","Data":"8b4e711ca231fe7452ef2052a4b12b122b8a749ee4670a4b691165e5f548e102"} Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.852736 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"310b8699-5d0c-4cce-b8fd-90ccedc2ce85","Type":"ContainerDied","Data":"27e13ad600a897de0257f4229b70eca91bf43c65a3f94ca9f4d18dab7e7b5c08"} Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.852833 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"30a2b522-ef43-4b0a-8215-2bb928744e00","Type":"ContainerDied","Data":"dc6b682e84c6ca942b35366616802230819d0a851390b3d3f3d081955ffc7ea0"} Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.852926 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7bee7127-9367-4882-8ab1-0493128d2641","Type":"ContainerDied","Data":"b412b43cd790cbb5062aff33c4ac84ffd760e41e2d1023e30df25e13845e09dc"} Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.852963 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1fa6d725-8054-46f1-8c0c-c693d5306563","Type":"ContainerDied","Data":"cbd2b9e7b7d5ac247cdedd4649bdad8529cc7aa4ebacb10e1349b2c0e85bf7b0"} Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.852976 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"64da16e3-099d-4def-9656-91f40d64672f","Type":"ContainerDied","Data":"15d27efb250638fac86a83ac72b6e6b4f58a2384e37c15d20ca62c7fb2f83e07"} Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.860545 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone0c6f-account-delete-bbxfz" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.865958 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "7bee7127-9367-4882-8ab1-0493128d2641" (UID: "7bee7127-9367-4882-8ab1-0493128d2641"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.866331 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.866367 4857 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/30a2b522-ef43-4b0a-8215-2bb928744e00-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.866382 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.866395 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.866408 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.866421 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.866434 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/946c0669-4c99-46b7-a9ff-437042383642-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.866445 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.866459 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d0c82d5-b320-444c-a4d9-838ca3097157-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.866471 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a952329-a8d9-432d-ac5b-d88b7e2ede6b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.866480 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64da16e3-099d-4def-9656-91f40d64672f-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.866492 4857 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.866500 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bee7127-9367-4882-8ab1-0493128d2641-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.866509 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310b8699-5d0c-4cce-b8fd-90ccedc2ce85-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:14 crc kubenswrapper[4857]: E1128 13:44:14.866572 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:14 crc kubenswrapper[4857]: E1128 13:44:14.866618 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts podName:24a3dca4-a3d0-479d-9be8-fb8c16f97a77 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:16.866602685 +0000 UTC m=+1548.893977852 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts") pod "cinderd8b3-account-delete-lxwj8" (UID: "24a3dca4-a3d0-479d-9be8-fb8c16f97a77") : configmap "openstack-scripts" not found Nov 28 13:44:14 crc kubenswrapper[4857]: E1128 13:44:14.866855 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:14 crc kubenswrapper[4857]: E1128 13:44:14.866925 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts podName:d8c0e041-9c74-4a06-a966-833e919e745a nodeName:}" failed. No retries permitted until 2025-11-28 13:44:16.866903383 +0000 UTC m=+1548.894278600 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts") pod "barbican7dd8-account-delete-jg2j5" (UID: "d8c0e041-9c74-4a06-a966-833e919e745a") : configmap "openstack-scripts" not found Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.868972 4857 scope.go:117] "RemoveContainer" containerID="5ac6adaa76a02bc0a74df277af75098ac24f5239a90ad5f23966871efb74d2a3" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.877002 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-config-data" (OuterVolumeSpecName: "config-data") pod "4477c075-9151-49cc-bb52-82dc34ea46ec" (UID: "4477c075-9151-49cc-bb52-82dc34ea46ec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:14 crc kubenswrapper[4857]: I1128 13:44:14.967537 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4477c075-9151-49cc-bb52-82dc34ea46ec-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: E1128 13:44:15.277977 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 13:44:15 crc kubenswrapper[4857]: E1128 13:44:15.278052 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-config-data podName:cfbd0457-d459-4bf2-bdaf-8b61db5cce65 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:23.278032811 +0000 UTC m=+1555.305407988 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-config-data") pod "rabbitmq-server-0" (UID: "cfbd0457-d459-4bf2-bdaf-8b61db5cce65") : configmap "rabbitmq-config-data" not found Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.282160 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.285674 4857 scope.go:117] "RemoveContainer" containerID="2b1f1cfc83df026dae7bf7bf7c447aef3e892986be530a90b20b933b8fe1c77c" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.291292 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.316355 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-856655ccc5-9fgqc"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.342813 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-856655ccc5-9fgqc"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.349088 4857 scope.go:117] "RemoveContainer" containerID="ec5937438716528a8aa131c5d6bf8c9a57f6a24f30318571c52d136b077dfcf7" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.354176 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron5867-account-delete-58twd"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.360217 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron5867-account-delete-58twd"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.462333 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_076d849e-fd88-4add-a5f9-e45a1983a606/ovn-northd/0.log" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.462448 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.501823 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.507152 4857 scope.go:117] "RemoveContainer" containerID="e575a5748441c404e5228a5c2146f98ab1fd6c5ae67eb9523fedd879f306a6a7" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.515107 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: E1128 13:44:15.521221 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:44:15 crc kubenswrapper[4857]: E1128 13:44:15.521774 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:44:15 crc kubenswrapper[4857]: E1128 13:44:15.522645 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:44:15 crc kubenswrapper[4857]: E1128 13:44:15.526872 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:44:15 crc kubenswrapper[4857]: E1128 13:44:15.526945 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-ph2cf" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovsdb-server" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.528282 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: E1128 13:44:15.546380 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.565906 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.567971 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.573466 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.590230 4857 scope.go:117] "RemoveContainer" containerID="4dd7dcf6024fd47fb7c4424b294f5cadc4f936ab98e05bb09fe4f5e3d7651e94" Nov 28 13:44:15 crc kubenswrapper[4857]: E1128 13:44:15.590887 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:44:15 crc kubenswrapper[4857]: E1128 13:44:15.590953 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-ph2cf" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovs-vswitchd" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.590966 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/076d849e-fd88-4add-a5f9-e45a1983a606-combined-ca-bundle\") pod \"076d849e-fd88-4add-a5f9-e45a1983a606\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.591047 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-combined-ca-bundle\") pod \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.591103 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/076d849e-fd88-4add-a5f9-e45a1983a606-scripts\") pod \"076d849e-fd88-4add-a5f9-e45a1983a606\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.591132 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-config-data-generated\") pod \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.591163 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/076d849e-fd88-4add-a5f9-e45a1983a606-ovn-northd-tls-certs\") pod \"076d849e-fd88-4add-a5f9-e45a1983a606\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.591187 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8f9h8\" (UniqueName: \"kubernetes.io/projected/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-kube-api-access-8f9h8\") pod \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.591213 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-config-data-default\") pod \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.591258 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/076d849e-fd88-4add-a5f9-e45a1983a606-config\") pod \"076d849e-fd88-4add-a5f9-e45a1983a606\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.591279 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/076d849e-fd88-4add-a5f9-e45a1983a606-metrics-certs-tls-certs\") pod \"076d849e-fd88-4add-a5f9-e45a1983a606\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.591302 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-kolla-config\") pod \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.591334 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s7d42\" (UniqueName: \"kubernetes.io/projected/076d849e-fd88-4add-a5f9-e45a1983a606-kube-api-access-s7d42\") pod \"076d849e-fd88-4add-a5f9-e45a1983a606\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.591384 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/076d849e-fd88-4add-a5f9-e45a1983a606-ovn-rundir\") pod \"076d849e-fd88-4add-a5f9-e45a1983a606\" (UID: \"076d849e-fd88-4add-a5f9-e45a1983a606\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.591407 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-operator-scripts\") pod \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.591430 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.591458 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-galera-tls-certs\") pod \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\" (UID: \"7b0c1834-7ece-4d9c-9cf1-28a53aea280e\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.591890 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/076d849e-fd88-4add-a5f9-e45a1983a606-scripts" (OuterVolumeSpecName: "scripts") pod "076d849e-fd88-4add-a5f9-e45a1983a606" (UID: "076d849e-fd88-4add-a5f9-e45a1983a606"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.592116 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/076d849e-fd88-4add-a5f9-e45a1983a606-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.592933 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "7b0c1834-7ece-4d9c-9cf1-28a53aea280e" (UID: "7b0c1834-7ece-4d9c-9cf1-28a53aea280e"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.594304 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/076d849e-fd88-4add-a5f9-e45a1983a606-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "076d849e-fd88-4add-a5f9-e45a1983a606" (UID: "076d849e-fd88-4add-a5f9-e45a1983a606"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.594315 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "7b0c1834-7ece-4d9c-9cf1-28a53aea280e" (UID: "7b0c1834-7ece-4d9c-9cf1-28a53aea280e"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.595000 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7b0c1834-7ece-4d9c-9cf1-28a53aea280e" (UID: "7b0c1834-7ece-4d9c-9cf1-28a53aea280e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.596021 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "7b0c1834-7ece-4d9c-9cf1-28a53aea280e" (UID: "7b0c1834-7ece-4d9c-9cf1-28a53aea280e"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.603730 4857 generic.go:334] "Generic (PLEG): container finished" podID="71cc1f00-1a63-428e-8f12-2136ab077860" containerID="72d325a6ac77417281a4f0e4c5deaeb2d676cf4b75f4ac8be5b905a3b744677c" exitCode=0 Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.603879 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"71cc1f00-1a63-428e-8f12-2136ab077860","Type":"ContainerDied","Data":"72d325a6ac77417281a4f0e4c5deaeb2d676cf4b75f4ac8be5b905a3b744677c"} Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.605966 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.616779 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/076d849e-fd88-4add-a5f9-e45a1983a606-config" (OuterVolumeSpecName: "config") pod "076d849e-fd88-4add-a5f9-e45a1983a606" (UID: "076d849e-fd88-4add-a5f9-e45a1983a606"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.631190 4857 generic.go:334] "Generic (PLEG): container finished" podID="7b0c1834-7ece-4d9c-9cf1-28a53aea280e" containerID="1d33095c862babd8cc0dde10c6cb07ce5a2d5f837c975bd8aca7a7cf1568e774" exitCode=0 Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.631517 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7b0c1834-7ece-4d9c-9cf1-28a53aea280e","Type":"ContainerDied","Data":"1d33095c862babd8cc0dde10c6cb07ce5a2d5f837c975bd8aca7a7cf1568e774"} Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.631775 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.631505 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-kube-api-access-8f9h8" (OuterVolumeSpecName: "kube-api-access-8f9h8") pod "7b0c1834-7ece-4d9c-9cf1-28a53aea280e" (UID: "7b0c1834-7ece-4d9c-9cf1-28a53aea280e"). InnerVolumeSpecName "kube-api-access-8f9h8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.632087 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/076d849e-fd88-4add-a5f9-e45a1983a606-kube-api-access-s7d42" (OuterVolumeSpecName: "kube-api-access-s7d42") pod "076d849e-fd88-4add-a5f9-e45a1983a606" (UID: "076d849e-fd88-4add-a5f9-e45a1983a606"). InnerVolumeSpecName "kube-api-access-s7d42". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.632546 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7b0c1834-7ece-4d9c-9cf1-28a53aea280e","Type":"ContainerDied","Data":"47b74021a1fe53b7d626c8a8aae43a33be047cff60be391678a65a393e3ab844"} Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.644820 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_076d849e-fd88-4add-a5f9-e45a1983a606/ovn-northd/0.log" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.644912 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"076d849e-fd88-4add-a5f9-e45a1983a606","Type":"ContainerDied","Data":"d8bcc929d2d6bab5c64bef5dbfc2e3fbdfbb52f64ab6c01fd92f824932851397"} Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.644881 4857 generic.go:334] "Generic (PLEG): container finished" podID="076d849e-fd88-4add-a5f9-e45a1983a606" containerID="d8bcc929d2d6bab5c64bef5dbfc2e3fbdfbb52f64ab6c01fd92f824932851397" exitCode=139 Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.644982 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.645083 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"076d849e-fd88-4add-a5f9-e45a1983a606","Type":"ContainerDied","Data":"edc48bdbd461607ee0559ddb2eb3267fcf1ec84a5868cd1943eee6b28324623c"} Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.655986 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.657987 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7b0c1834-7ece-4d9c-9cf1-28a53aea280e" (UID: "7b0c1834-7ece-4d9c-9cf1-28a53aea280e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.670897 4857 generic.go:334] "Generic (PLEG): container finished" podID="cfbd0457-d459-4bf2-bdaf-8b61db5cce65" containerID="b4dc40ec2aafb3b05e54fb73bbf1e3fb91135c9bbf7ec2c351e4ea6cea29e654" exitCode=0 Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.670957 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cfbd0457-d459-4bf2-bdaf-8b61db5cce65","Type":"ContainerDied","Data":"b4dc40ec2aafb3b05e54fb73bbf1e3fb91135c9bbf7ec2c351e4ea6cea29e654"} Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.671347 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinderd8b3-account-delete-lxwj8" podUID="24a3dca4-a3d0-479d-9be8-fb8c16f97a77" containerName="mariadb-account-delete" containerID="cri-o://c87cac856e484a65204ac4a22fcde410a6698e64a13f6990ed0561ebfe6b4815" gracePeriod=30 Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.671498 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/novaapi7cc9-account-delete-qjqg5" podUID="6f75b361-6a38-42a4-971c-1b3a68a3f10f" containerName="mariadb-account-delete" containerID="cri-o://e0661472bb397b0a3a1dd55baa5c4817a98ad0975c18b4cb900b5650b0bc0b7a" gracePeriod=30 Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.671622 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone0c6f-account-delete-bbxfz" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.671727 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/novacell032fc-account-delete-xk7xx" podUID="ad7bc32b-e1f1-4ce5-a094-56f37d676131" containerName="mariadb-account-delete" containerID="cri-o://176a8c3598639b71724934fa1590ebaca77aa44b9b8de6fbcb3127e2a34f1547" gracePeriod=30 Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.671800 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican7dd8-account-delete-jg2j5" podUID="d8c0e041-9c74-4a06-a966-833e919e745a" containerName="mariadb-account-delete" containerID="cri-o://7b2a4e3bea8fb0cf276592ab346a1a0e736d0c8fe0be99aa787de15196b3f05e" gracePeriod=30 Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.680880 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.691066 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/076d849e-fd88-4add-a5f9-e45a1983a606-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "076d849e-fd88-4add-a5f9-e45a1983a606" (UID: "076d849e-fd88-4add-a5f9-e45a1983a606"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.695353 4857 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/076d849e-fd88-4add-a5f9-e45a1983a606-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.695382 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.695393 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/076d849e-fd88-4add-a5f9-e45a1983a606-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.695401 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.695411 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.695423 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8f9h8\" (UniqueName: \"kubernetes.io/projected/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-kube-api-access-8f9h8\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.695434 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.695444 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/076d849e-fd88-4add-a5f9-e45a1983a606-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.695455 4857 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.695466 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s7d42\" (UniqueName: \"kubernetes.io/projected/076d849e-fd88-4add-a5f9-e45a1983a606-kube-api-access-s7d42\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.695909 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "mysql-db") pod "7b0c1834-7ece-4d9c-9cf1-28a53aea280e" (UID: "7b0c1834-7ece-4d9c-9cf1-28a53aea280e"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.697179 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.700222 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.711428 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.718037 4857 scope.go:117] "RemoveContainer" containerID="e24143c91b4a17a69c27afa164bb157bee14c4f0597ed2fa5ef6a42ffe793925" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.720263 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-575548d9c6-4zx6z"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.732353 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "7b0c1834-7ece-4d9c-9cf1-28a53aea280e" (UID: "7b0c1834-7ece-4d9c-9cf1-28a53aea280e"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.732523 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-575548d9c6-4zx6z"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.740907 4857 scope.go:117] "RemoveContainer" containerID="d8e862b58223c1ae15f7828a07974724e3a49c1477b31569a8dbea821c8bc09e" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.760442 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.773932 4857 scope.go:117] "RemoveContainer" containerID="990eadd5834f267197096c5bc4a36f6e0524a5b8386ca4956f7f56c8c34c8ce5" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.774913 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.775191 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/076d849e-fd88-4add-a5f9-e45a1983a606-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "076d849e-fd88-4add-a5f9-e45a1983a606" (UID: "076d849e-fd88-4add-a5f9-e45a1983a606"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.783966 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.789325 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.791417 4857 scope.go:117] "RemoveContainer" containerID="6307f97c800ac6b026e60dbaa702231a4783caba353d39fc8956c6ce72d5e01e" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.796395 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-server-conf\") pod \"71cc1f00-1a63-428e-8f12-2136ab077860\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.796521 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-plugins\") pod \"71cc1f00-1a63-428e-8f12-2136ab077860\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.796580 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-config-data\") pod \"71cc1f00-1a63-428e-8f12-2136ab077860\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.796617 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/71cc1f00-1a63-428e-8f12-2136ab077860-pod-info\") pod \"71cc1f00-1a63-428e-8f12-2136ab077860\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.796656 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ljgc2\" (UniqueName: \"kubernetes.io/projected/71cc1f00-1a63-428e-8f12-2136ab077860-kube-api-access-ljgc2\") pod \"71cc1f00-1a63-428e-8f12-2136ab077860\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.796682 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-erlang-cookie\") pod \"71cc1f00-1a63-428e-8f12-2136ab077860\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.796707 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-confd\") pod \"71cc1f00-1a63-428e-8f12-2136ab077860\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.796784 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-tls\") pod \"71cc1f00-1a63-428e-8f12-2136ab077860\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.796811 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/71cc1f00-1a63-428e-8f12-2136ab077860-erlang-cookie-secret\") pod \"71cc1f00-1a63-428e-8f12-2136ab077860\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.796836 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"71cc1f00-1a63-428e-8f12-2136ab077860\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.796894 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-plugins-conf\") pod \"71cc1f00-1a63-428e-8f12-2136ab077860\" (UID: \"71cc1f00-1a63-428e-8f12-2136ab077860\") " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.797328 4857 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/076d849e-fd88-4add-a5f9-e45a1983a606-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.797362 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.797378 4857 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b0c1834-7ece-4d9c-9cf1-28a53aea280e-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.797471 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "71cc1f00-1a63-428e-8f12-2136ab077860" (UID: "71cc1f00-1a63-428e-8f12-2136ab077860"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.798652 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "71cc1f00-1a63-428e-8f12-2136ab077860" (UID: "71cc1f00-1a63-428e-8f12-2136ab077860"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.798838 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "71cc1f00-1a63-428e-8f12-2136ab077860" (UID: "71cc1f00-1a63-428e-8f12-2136ab077860"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.802957 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "71cc1f00-1a63-428e-8f12-2136ab077860" (UID: "71cc1f00-1a63-428e-8f12-2136ab077860"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.803103 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71cc1f00-1a63-428e-8f12-2136ab077860-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "71cc1f00-1a63-428e-8f12-2136ab077860" (UID: "71cc1f00-1a63-428e-8f12-2136ab077860"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.803157 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "persistence") pod "71cc1f00-1a63-428e-8f12-2136ab077860" (UID: "71cc1f00-1a63-428e-8f12-2136ab077860"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.803870 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71cc1f00-1a63-428e-8f12-2136ab077860-kube-api-access-ljgc2" (OuterVolumeSpecName: "kube-api-access-ljgc2") pod "71cc1f00-1a63-428e-8f12-2136ab077860" (UID: "71cc1f00-1a63-428e-8f12-2136ab077860"). InnerVolumeSpecName "kube-api-access-ljgc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.804101 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-749fd8cf96-rbd6r"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.807173 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/71cc1f00-1a63-428e-8f12-2136ab077860-pod-info" (OuterVolumeSpecName: "pod-info") pod "71cc1f00-1a63-428e-8f12-2136ab077860" (UID: "71cc1f00-1a63-428e-8f12-2136ab077860"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.815767 4857 scope.go:117] "RemoveContainer" containerID="d880cc69cc93c55dd123da2ed1ba8cf195b6e491b2fba33f24d18a403279c8c6" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.822042 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.822608 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-749fd8cf96-rbd6r"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.826772 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/076d849e-fd88-4add-a5f9-e45a1983a606-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "076d849e-fd88-4add-a5f9-e45a1983a606" (UID: "076d849e-fd88-4add-a5f9-e45a1983a606"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.828946 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-config-data" (OuterVolumeSpecName: "config-data") pod "71cc1f00-1a63-428e-8f12-2136ab077860" (UID: "71cc1f00-1a63-428e-8f12-2136ab077860"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.831027 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.845595 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.847534 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-server-conf" (OuterVolumeSpecName: "server-conf") pod "71cc1f00-1a63-428e-8f12-2136ab077860" (UID: "71cc1f00-1a63-428e-8f12-2136ab077860"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.852029 4857 scope.go:117] "RemoveContainer" containerID="165d4ed1ed3f722b3e2c4f5769e7c056c9eafbeeed14d27d5233ab4f09a4ef4d" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.854784 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.862457 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.875980 4857 scope.go:117] "RemoveContainer" containerID="165d4ed1ed3f722b3e2c4f5769e7c056c9eafbeeed14d27d5233ab4f09a4ef4d" Nov 28 13:44:15 crc kubenswrapper[4857]: E1128 13:44:15.876313 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"165d4ed1ed3f722b3e2c4f5769e7c056c9eafbeeed14d27d5233ab4f09a4ef4d\": container with ID starting with 165d4ed1ed3f722b3e2c4f5769e7c056c9eafbeeed14d27d5233ab4f09a4ef4d not found: ID does not exist" containerID="165d4ed1ed3f722b3e2c4f5769e7c056c9eafbeeed14d27d5233ab4f09a4ef4d" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.876345 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"165d4ed1ed3f722b3e2c4f5769e7c056c9eafbeeed14d27d5233ab4f09a4ef4d"} err="failed to get container status \"165d4ed1ed3f722b3e2c4f5769e7c056c9eafbeeed14d27d5233ab4f09a4ef4d\": rpc error: code = NotFound desc = could not find container \"165d4ed1ed3f722b3e2c4f5769e7c056c9eafbeeed14d27d5233ab4f09a4ef4d\": container with ID starting with 165d4ed1ed3f722b3e2c4f5769e7c056c9eafbeeed14d27d5233ab4f09a4ef4d not found: ID does not exist" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.876363 4857 scope.go:117] "RemoveContainer" containerID="af2a21437b5950c07391db6d069bd153d9b422fd5daa52cd346a87417d643f35" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.883134 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "71cc1f00-1a63-428e-8f12-2136ab077860" (UID: "71cc1f00-1a63-428e-8f12-2136ab077860"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.888644 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone0c6f-account-delete-bbxfz"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.894537 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone0c6f-account-delete-bbxfz"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.898368 4857 scope.go:117] "RemoveContainer" containerID="8b4e711ca231fe7452ef2052a4b12b122b8a749ee4670a4b691165e5f548e102" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.898903 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.898922 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.898931 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.898942 4857 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/71cc1f00-1a63-428e-8f12-2136ab077860-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.898950 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ljgc2\" (UniqueName: \"kubernetes.io/projected/71cc1f00-1a63-428e-8f12-2136ab077860-kube-api-access-ljgc2\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.898959 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.898967 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.898975 4857 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/076d849e-fd88-4add-a5f9-e45a1983a606-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.898983 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/71cc1f00-1a63-428e-8f12-2136ab077860-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.898990 4857 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/71cc1f00-1a63-428e-8f12-2136ab077860-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.899010 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.899019 4857 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.899027 4857 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/71cc1f00-1a63-428e-8f12-2136ab077860-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.916008 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.925463 4857 scope.go:117] "RemoveContainer" containerID="8b4e711ca231fe7452ef2052a4b12b122b8a749ee4670a4b691165e5f548e102" Nov 28 13:44:15 crc kubenswrapper[4857]: E1128 13:44:15.925947 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b4e711ca231fe7452ef2052a4b12b122b8a749ee4670a4b691165e5f548e102\": container with ID starting with 8b4e711ca231fe7452ef2052a4b12b122b8a749ee4670a4b691165e5f548e102 not found: ID does not exist" containerID="8b4e711ca231fe7452ef2052a4b12b122b8a749ee4670a4b691165e5f548e102" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.926033 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b4e711ca231fe7452ef2052a4b12b122b8a749ee4670a4b691165e5f548e102"} err="failed to get container status \"8b4e711ca231fe7452ef2052a4b12b122b8a749ee4670a4b691165e5f548e102\": rpc error: code = NotFound desc = could not find container \"8b4e711ca231fe7452ef2052a4b12b122b8a749ee4670a4b691165e5f548e102\": container with ID starting with 8b4e711ca231fe7452ef2052a4b12b122b8a749ee4670a4b691165e5f548e102 not found: ID does not exist" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.926111 4857 scope.go:117] "RemoveContainer" containerID="32156962d3c5fd3e7bbc12ce4bc19050625834d7bd9f60bcb681cfb5610ca641" Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.964531 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.972259 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.987794 4857 scope.go:117] "RemoveContainer" containerID="15caeb74f903a78a3ff675fa24fc2fa63c9da6eab92af97c459eb92425c7c093" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.003742 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.003784 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kkt2b\" (UniqueName: \"kubernetes.io/projected/dab9a798-b94d-47b1-bd82-48ff5f477dc5-kube-api-access-kkt2b\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.003794 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dab9a798-b94d-47b1-bd82-48ff5f477dc5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.005540 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.010909 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.028023 4857 scope.go:117] "RemoveContainer" containerID="361acb609316369ca05f319244bbf84ef779ab12c43ab51d140a9f1785789d5e" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.056552 4857 scope.go:117] "RemoveContainer" containerID="5293dd02b3d8cbb50029798677d596c61dae0e02fb1b0ef17359254ce5d584b6" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.072213 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.084406 4857 scope.go:117] "RemoveContainer" containerID="1146e3ec8a4d803ee31e0a88958bb4723468c3f9bc7e9a7d393734acda6d6b4a" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.117867 4857 scope.go:117] "RemoveContainer" containerID="ee074130ed95276ff4a950681c7df3344a6e4c3aa86435eb4b8f9e471126f272" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.153641 4857 scope.go:117] "RemoveContainer" containerID="1d33095c862babd8cc0dde10c6cb07ce5a2d5f837c975bd8aca7a7cf1568e774" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.170532 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.182238 4857 scope.go:117] "RemoveContainer" containerID="f3d21b615ee4666a889ddc77f9bc3da38000a4886f57b39297860f562322b2b3" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.205588 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-config-data\") pod \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.206096 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-confd\") pod \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.206133 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-pod-info\") pod \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.206188 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4dfc8\" (UniqueName: \"kubernetes.io/projected/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-kube-api-access-4dfc8\") pod \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.206223 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.206261 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-tls\") pod \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.206290 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-plugins-conf\") pod \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.206342 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-plugins\") pod \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.206374 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-erlang-cookie-secret\") pod \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.206453 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-server-conf\") pod \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.206528 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-erlang-cookie\") pod \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\" (UID: \"cfbd0457-d459-4bf2-bdaf-8b61db5cce65\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.207343 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "cfbd0457-d459-4bf2-bdaf-8b61db5cce65" (UID: "cfbd0457-d459-4bf2-bdaf-8b61db5cce65"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.207924 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "cfbd0457-d459-4bf2-bdaf-8b61db5cce65" (UID: "cfbd0457-d459-4bf2-bdaf-8b61db5cce65"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.208284 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "cfbd0457-d459-4bf2-bdaf-8b61db5cce65" (UID: "cfbd0457-d459-4bf2-bdaf-8b61db5cce65"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.231333 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "cfbd0457-d459-4bf2-bdaf-8b61db5cce65" (UID: "cfbd0457-d459-4bf2-bdaf-8b61db5cce65"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.233499 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-kube-api-access-4dfc8" (OuterVolumeSpecName: "kube-api-access-4dfc8") pod "cfbd0457-d459-4bf2-bdaf-8b61db5cce65" (UID: "cfbd0457-d459-4bf2-bdaf-8b61db5cce65"). InnerVolumeSpecName "kube-api-access-4dfc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.236067 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "persistence") pod "cfbd0457-d459-4bf2-bdaf-8b61db5cce65" (UID: "cfbd0457-d459-4bf2-bdaf-8b61db5cce65"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.238502 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-pod-info" (OuterVolumeSpecName: "pod-info") pod "cfbd0457-d459-4bf2-bdaf-8b61db5cce65" (UID: "cfbd0457-d459-4bf2-bdaf-8b61db5cce65"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.240775 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "cfbd0457-d459-4bf2-bdaf-8b61db5cce65" (UID: "cfbd0457-d459-4bf2-bdaf-8b61db5cce65"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.241931 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-config-data" (OuterVolumeSpecName: "config-data") pod "cfbd0457-d459-4bf2-bdaf-8b61db5cce65" (UID: "cfbd0457-d459-4bf2-bdaf-8b61db5cce65"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.249970 4857 scope.go:117] "RemoveContainer" containerID="1d33095c862babd8cc0dde10c6cb07ce5a2d5f837c975bd8aca7a7cf1568e774" Nov 28 13:44:16 crc kubenswrapper[4857]: E1128 13:44:16.250475 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d33095c862babd8cc0dde10c6cb07ce5a2d5f837c975bd8aca7a7cf1568e774\": container with ID starting with 1d33095c862babd8cc0dde10c6cb07ce5a2d5f837c975bd8aca7a7cf1568e774 not found: ID does not exist" containerID="1d33095c862babd8cc0dde10c6cb07ce5a2d5f837c975bd8aca7a7cf1568e774" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.250513 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d33095c862babd8cc0dde10c6cb07ce5a2d5f837c975bd8aca7a7cf1568e774"} err="failed to get container status \"1d33095c862babd8cc0dde10c6cb07ce5a2d5f837c975bd8aca7a7cf1568e774\": rpc error: code = NotFound desc = could not find container \"1d33095c862babd8cc0dde10c6cb07ce5a2d5f837c975bd8aca7a7cf1568e774\": container with ID starting with 1d33095c862babd8cc0dde10c6cb07ce5a2d5f837c975bd8aca7a7cf1568e774 not found: ID does not exist" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.250537 4857 scope.go:117] "RemoveContainer" containerID="f3d21b615ee4666a889ddc77f9bc3da38000a4886f57b39297860f562322b2b3" Nov 28 13:44:16 crc kubenswrapper[4857]: E1128 13:44:16.250924 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3d21b615ee4666a889ddc77f9bc3da38000a4886f57b39297860f562322b2b3\": container with ID starting with f3d21b615ee4666a889ddc77f9bc3da38000a4886f57b39297860f562322b2b3 not found: ID does not exist" containerID="f3d21b615ee4666a889ddc77f9bc3da38000a4886f57b39297860f562322b2b3" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.250950 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3d21b615ee4666a889ddc77f9bc3da38000a4886f57b39297860f562322b2b3"} err="failed to get container status \"f3d21b615ee4666a889ddc77f9bc3da38000a4886f57b39297860f562322b2b3\": rpc error: code = NotFound desc = could not find container \"f3d21b615ee4666a889ddc77f9bc3da38000a4886f57b39297860f562322b2b3\": container with ID starting with f3d21b615ee4666a889ddc77f9bc3da38000a4886f57b39297860f562322b2b3 not found: ID does not exist" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.250972 4857 scope.go:117] "RemoveContainer" containerID="0d984f57d2f9c989b335dc40eddb0295b7c07a2ff3153367f9c77e845c49ab2d" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.284262 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-server-conf" (OuterVolumeSpecName: "server-conf") pod "cfbd0457-d459-4bf2-bdaf-8b61db5cce65" (UID: "cfbd0457-d459-4bf2-bdaf-8b61db5cce65"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.297858 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "cfbd0457-d459-4bf2-bdaf-8b61db5cce65" (UID: "cfbd0457-d459-4bf2-bdaf-8b61db5cce65"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308245 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-fernet-keys\") pod \"adfd05de-d1db-45d3-aea1-b35dc0110b71\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308277 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-public-tls-certs\") pod \"adfd05de-d1db-45d3-aea1-b35dc0110b71\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308294 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-credential-keys\") pod \"adfd05de-d1db-45d3-aea1-b35dc0110b71\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308346 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-combined-ca-bundle\") pod \"adfd05de-d1db-45d3-aea1-b35dc0110b71\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308415 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-internal-tls-certs\") pod \"adfd05de-d1db-45d3-aea1-b35dc0110b71\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308445 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-scripts\") pod \"adfd05de-d1db-45d3-aea1-b35dc0110b71\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308464 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hh962\" (UniqueName: \"kubernetes.io/projected/adfd05de-d1db-45d3-aea1-b35dc0110b71-kube-api-access-hh962\") pod \"adfd05de-d1db-45d3-aea1-b35dc0110b71\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308527 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-config-data\") pod \"adfd05de-d1db-45d3-aea1-b35dc0110b71\" (UID: \"adfd05de-d1db-45d3-aea1-b35dc0110b71\") " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308837 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308854 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308864 4857 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308874 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308882 4857 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308889 4857 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308897 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308906 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308926 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308935 4857 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.308943 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4dfc8\" (UniqueName: \"kubernetes.io/projected/cfbd0457-d459-4bf2-bdaf-8b61db5cce65-kube-api-access-4dfc8\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.313859 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "adfd05de-d1db-45d3-aea1-b35dc0110b71" (UID: "adfd05de-d1db-45d3-aea1-b35dc0110b71"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.315496 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "adfd05de-d1db-45d3-aea1-b35dc0110b71" (UID: "adfd05de-d1db-45d3-aea1-b35dc0110b71"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.318701 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-scripts" (OuterVolumeSpecName: "scripts") pod "adfd05de-d1db-45d3-aea1-b35dc0110b71" (UID: "adfd05de-d1db-45d3-aea1-b35dc0110b71"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.320111 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/adfd05de-d1db-45d3-aea1-b35dc0110b71-kube-api-access-hh962" (OuterVolumeSpecName: "kube-api-access-hh962") pod "adfd05de-d1db-45d3-aea1-b35dc0110b71" (UID: "adfd05de-d1db-45d3-aea1-b35dc0110b71"). InnerVolumeSpecName "kube-api-access-hh962". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.326804 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="076d849e-fd88-4add-a5f9-e45a1983a606" path="/var/lib/kubelet/pods/076d849e-fd88-4add-a5f9-e45a1983a606/volumes" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.329216 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d0c82d5-b320-444c-a4d9-838ca3097157" path="/var/lib/kubelet/pods/0d0c82d5-b320-444c-a4d9-838ca3097157/volumes" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.330310 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1fa6d725-8054-46f1-8c0c-c693d5306563" path="/var/lib/kubelet/pods/1fa6d725-8054-46f1-8c0c-c693d5306563/volumes" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.331330 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd" path="/var/lib/kubelet/pods/2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd/volumes" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.332620 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30a2b522-ef43-4b0a-8215-2bb928744e00" path="/var/lib/kubelet/pods/30a2b522-ef43-4b0a-8215-2bb928744e00/volumes" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.333285 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.334404 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="310b8699-5d0c-4cce-b8fd-90ccedc2ce85" path="/var/lib/kubelet/pods/310b8699-5d0c-4cce-b8fd-90ccedc2ce85/volumes" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.335131 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a952329-a8d9-432d-ac5b-d88b7e2ede6b" path="/var/lib/kubelet/pods/3a952329-a8d9-432d-ac5b-d88b7e2ede6b/volumes" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.335979 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4477c075-9151-49cc-bb52-82dc34ea46ec" path="/var/lib/kubelet/pods/4477c075-9151-49cc-bb52-82dc34ea46ec/volumes" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.337717 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64da16e3-099d-4def-9656-91f40d64672f" path="/var/lib/kubelet/pods/64da16e3-099d-4def-9656-91f40d64672f/volumes" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.338228 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-config-data" (OuterVolumeSpecName: "config-data") pod "adfd05de-d1db-45d3-aea1-b35dc0110b71" (UID: "adfd05de-d1db-45d3-aea1-b35dc0110b71"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.338410 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7358aa80-dbe4-4a31-ad84-9dc125491046" path="/var/lib/kubelet/pods/7358aa80-dbe4-4a31-ad84-9dc125491046/volumes" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.339623 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b0c1834-7ece-4d9c-9cf1-28a53aea280e" path="/var/lib/kubelet/pods/7b0c1834-7ece-4d9c-9cf1-28a53aea280e/volumes" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.340691 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bee7127-9367-4882-8ab1-0493128d2641" path="/var/lib/kubelet/pods/7bee7127-9367-4882-8ab1-0493128d2641/volumes" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.341452 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89adcb9a-b993-4e60-ae3b-413bed35ae0d" path="/var/lib/kubelet/pods/89adcb9a-b993-4e60-ae3b-413bed35ae0d/volumes" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.342634 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="946c0669-4c99-46b7-a9ff-437042383642" path="/var/lib/kubelet/pods/946c0669-4c99-46b7-a9ff-437042383642/volumes" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.344544 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dab9a798-b94d-47b1-bd82-48ff5f477dc5" path="/var/lib/kubelet/pods/dab9a798-b94d-47b1-bd82-48ff5f477dc5/volumes" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.344966 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" path="/var/lib/kubelet/pods/ea2604b9-e3ca-4145-b8c3-42a9b8e3b286/volumes" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.345643 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7bf9e28-fd40-4b0d-aac9-995eff12a115" path="/var/lib/kubelet/pods/f7bf9e28-fd40-4b0d-aac9-995eff12a115/volumes" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.347396 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "adfd05de-d1db-45d3-aea1-b35dc0110b71" (UID: "adfd05de-d1db-45d3-aea1-b35dc0110b71"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.350808 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "adfd05de-d1db-45d3-aea1-b35dc0110b71" (UID: "adfd05de-d1db-45d3-aea1-b35dc0110b71"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.378652 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "adfd05de-d1db-45d3-aea1-b35dc0110b71" (UID: "adfd05de-d1db-45d3-aea1-b35dc0110b71"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.409973 4857 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.410014 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.410028 4857 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.410040 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.410050 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.410060 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.410071 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hh962\" (UniqueName: \"kubernetes.io/projected/adfd05de-d1db-45d3-aea1-b35dc0110b71-kube-api-access-hh962\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.410082 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.410103 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adfd05de-d1db-45d3-aea1-b35dc0110b71-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.419002 4857 scope.go:117] "RemoveContainer" containerID="d8bcc929d2d6bab5c64bef5dbfc2e3fbdfbb52f64ab6c01fd92f824932851397" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.434845 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-7d4894d65-gqnvs" podUID="151aff2f-7aaa-4964-8f75-51c8faf86397" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.152:9696/\": dial tcp 10.217.0.152:9696: connect: connection refused" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.447562 4857 scope.go:117] "RemoveContainer" containerID="0d984f57d2f9c989b335dc40eddb0295b7c07a2ff3153367f9c77e845c49ab2d" Nov 28 13:44:16 crc kubenswrapper[4857]: E1128 13:44:16.448017 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d984f57d2f9c989b335dc40eddb0295b7c07a2ff3153367f9c77e845c49ab2d\": container with ID starting with 0d984f57d2f9c989b335dc40eddb0295b7c07a2ff3153367f9c77e845c49ab2d not found: ID does not exist" containerID="0d984f57d2f9c989b335dc40eddb0295b7c07a2ff3153367f9c77e845c49ab2d" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.448053 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d984f57d2f9c989b335dc40eddb0295b7c07a2ff3153367f9c77e845c49ab2d"} err="failed to get container status \"0d984f57d2f9c989b335dc40eddb0295b7c07a2ff3153367f9c77e845c49ab2d\": rpc error: code = NotFound desc = could not find container \"0d984f57d2f9c989b335dc40eddb0295b7c07a2ff3153367f9c77e845c49ab2d\": container with ID starting with 0d984f57d2f9c989b335dc40eddb0295b7c07a2ff3153367f9c77e845c49ab2d not found: ID does not exist" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.448075 4857 scope.go:117] "RemoveContainer" containerID="d8bcc929d2d6bab5c64bef5dbfc2e3fbdfbb52f64ab6c01fd92f824932851397" Nov 28 13:44:16 crc kubenswrapper[4857]: E1128 13:44:16.451065 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8bcc929d2d6bab5c64bef5dbfc2e3fbdfbb52f64ab6c01fd92f824932851397\": container with ID starting with d8bcc929d2d6bab5c64bef5dbfc2e3fbdfbb52f64ab6c01fd92f824932851397 not found: ID does not exist" containerID="d8bcc929d2d6bab5c64bef5dbfc2e3fbdfbb52f64ab6c01fd92f824932851397" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.451098 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8bcc929d2d6bab5c64bef5dbfc2e3fbdfbb52f64ab6c01fd92f824932851397"} err="failed to get container status \"d8bcc929d2d6bab5c64bef5dbfc2e3fbdfbb52f64ab6c01fd92f824932851397\": rpc error: code = NotFound desc = could not find container \"d8bcc929d2d6bab5c64bef5dbfc2e3fbdfbb52f64ab6c01fd92f824932851397\": container with ID starting with d8bcc929d2d6bab5c64bef5dbfc2e3fbdfbb52f64ab6c01fd92f824932851397 not found: ID does not exist" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.604629 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-r57mh"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.613187 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glanceeef9-account-delete-chvxh"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.619123 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-eef9-account-create-update-kg8k5"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.624190 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-r57mh"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.628929 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glanceeef9-account-delete-chvxh"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.634216 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-eef9-account-create-update-kg8k5"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.688640 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cfbd0457-d459-4bf2-bdaf-8b61db5cce65","Type":"ContainerDied","Data":"d7bbbe16f23e34069c8fd52b92fbd34354c6eadcec7ef8b0cb7aeefa5a813137"} Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.688652 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.688737 4857 scope.go:117] "RemoveContainer" containerID="b4dc40ec2aafb3b05e54fb73bbf1e3fb91135c9bbf7ec2c351e4ea6cea29e654" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.696078 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"71cc1f00-1a63-428e-8f12-2136ab077860","Type":"ContainerDied","Data":"411e2fc221cd1d35da69d2b8483e64f10e30a3df064542e60d4be06817567a1c"} Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.696191 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.715371 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.723159 4857 scope.go:117] "RemoveContainer" containerID="0b4fdb93170b6f9968d2f6150fa31e0cec84ad5ec1c7df2d3bf7d8ff7467e6e7" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.727193 4857 generic.go:334] "Generic (PLEG): container finished" podID="adfd05de-d1db-45d3-aea1-b35dc0110b71" containerID="c6ba92e4d979c8b69f5fe686fd993ea41a62407d93386f6c437c35d1fe1b2018" exitCode=0 Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.727312 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5f4cb87f5f-m76pk" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.728051 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5f4cb87f5f-m76pk" event={"ID":"adfd05de-d1db-45d3-aea1-b35dc0110b71","Type":"ContainerDied","Data":"c6ba92e4d979c8b69f5fe686fd993ea41a62407d93386f6c437c35d1fe1b2018"} Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.728107 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5f4cb87f5f-m76pk" event={"ID":"adfd05de-d1db-45d3-aea1-b35dc0110b71","Type":"ContainerDied","Data":"8b9e52427797212791fc67ee21db92de7bbbcb3a3e285875fba5f906607faecb"} Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.730817 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.749434 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.755204 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.817650 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-z8bzr"] Nov 28 13:44:16 crc kubenswrapper[4857]: E1128 13:44:16.825767 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:16 crc kubenswrapper[4857]: E1128 13:44:16.825827 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:16 crc kubenswrapper[4857]: E1128 13:44:16.825841 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts podName:ad7bc32b-e1f1-4ce5-a094-56f37d676131 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:20.825823805 +0000 UTC m=+1552.853198972 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts") pod "novacell032fc-account-delete-xk7xx" (UID: "ad7bc32b-e1f1-4ce5-a094-56f37d676131") : configmap "openstack-scripts" not found Nov 28 13:44:16 crc kubenswrapper[4857]: E1128 13:44:16.825906 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts podName:6f75b361-6a38-42a4-971c-1b3a68a3f10f nodeName:}" failed. No retries permitted until 2025-11-28 13:44:20.825888197 +0000 UTC m=+1552.853263364 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts") pod "novaapi7cc9-account-delete-qjqg5" (UID: "6f75b361-6a38-42a4-971c-1b3a68a3f10f") : configmap "openstack-scripts" not found Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.831584 4857 scope.go:117] "RemoveContainer" containerID="72d325a6ac77417281a4f0e4c5deaeb2d676cf4b75f4ac8be5b905a3b744677c" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.838432 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-z8bzr"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.845047 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placementcdcd-account-delete-h5qc4"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.851395 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-cdcd-account-create-update-ljmhf"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.858484 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-5f4cb87f5f-m76pk"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.860074 4857 scope.go:117] "RemoveContainer" containerID="da003013615b1f7d03fb067beb76ca6840f95de6e79bbeb6ebc074ff574b4949" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.865485 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-5f4cb87f5f-m76pk"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.870974 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-cdcd-account-create-update-ljmhf"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.876924 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placementcdcd-account-delete-h5qc4"] Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.896407 4857 scope.go:117] "RemoveContainer" containerID="c6ba92e4d979c8b69f5fe686fd993ea41a62407d93386f6c437c35d1fe1b2018" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.927080 4857 scope.go:117] "RemoveContainer" containerID="c6ba92e4d979c8b69f5fe686fd993ea41a62407d93386f6c437c35d1fe1b2018" Nov 28 13:44:16 crc kubenswrapper[4857]: E1128 13:44:16.927580 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6ba92e4d979c8b69f5fe686fd993ea41a62407d93386f6c437c35d1fe1b2018\": container with ID starting with c6ba92e4d979c8b69f5fe686fd993ea41a62407d93386f6c437c35d1fe1b2018 not found: ID does not exist" containerID="c6ba92e4d979c8b69f5fe686fd993ea41a62407d93386f6c437c35d1fe1b2018" Nov 28 13:44:16 crc kubenswrapper[4857]: I1128 13:44:16.927631 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6ba92e4d979c8b69f5fe686fd993ea41a62407d93386f6c437c35d1fe1b2018"} err="failed to get container status \"c6ba92e4d979c8b69f5fe686fd993ea41a62407d93386f6c437c35d1fe1b2018\": rpc error: code = NotFound desc = could not find container \"c6ba92e4d979c8b69f5fe686fd993ea41a62407d93386f6c437c35d1fe1b2018\": container with ID starting with c6ba92e4d979c8b69f5fe686fd993ea41a62407d93386f6c437c35d1fe1b2018 not found: ID does not exist" Nov 28 13:44:16 crc kubenswrapper[4857]: E1128 13:44:16.927926 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:16 crc kubenswrapper[4857]: E1128 13:44:16.927943 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:16 crc kubenswrapper[4857]: E1128 13:44:16.927999 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts podName:24a3dca4-a3d0-479d-9be8-fb8c16f97a77 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:20.927982229 +0000 UTC m=+1552.955357406 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts") pod "cinderd8b3-account-delete-lxwj8" (UID: "24a3dca4-a3d0-479d-9be8-fb8c16f97a77") : configmap "openstack-scripts" not found Nov 28 13:44:16 crc kubenswrapper[4857]: E1128 13:44:16.928018 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts podName:d8c0e041-9c74-4a06-a966-833e919e745a nodeName:}" failed. No retries permitted until 2025-11-28 13:44:20.92801068 +0000 UTC m=+1552.955385847 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts") pod "barbican7dd8-account-delete-jg2j5" (UID: "d8c0e041-9c74-4a06-a966-833e919e745a") : configmap "openstack-scripts" not found Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.134652 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.231297 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-combined-ca-bundle\") pod \"cc11fd89-0365-46e5-b8b1-48f933611ab9\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.231340 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-config-data\") pod \"cc11fd89-0365-46e5-b8b1-48f933611ab9\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.231363 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-sg-core-conf-yaml\") pod \"cc11fd89-0365-46e5-b8b1-48f933611ab9\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.231443 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-scripts\") pod \"cc11fd89-0365-46e5-b8b1-48f933611ab9\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.231471 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4h48c\" (UniqueName: \"kubernetes.io/projected/cc11fd89-0365-46e5-b8b1-48f933611ab9-kube-api-access-4h48c\") pod \"cc11fd89-0365-46e5-b8b1-48f933611ab9\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.231514 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-ceilometer-tls-certs\") pod \"cc11fd89-0365-46e5-b8b1-48f933611ab9\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.231591 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc11fd89-0365-46e5-b8b1-48f933611ab9-run-httpd\") pod \"cc11fd89-0365-46e5-b8b1-48f933611ab9\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.231612 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc11fd89-0365-46e5-b8b1-48f933611ab9-log-httpd\") pod \"cc11fd89-0365-46e5-b8b1-48f933611ab9\" (UID: \"cc11fd89-0365-46e5-b8b1-48f933611ab9\") " Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.232472 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc11fd89-0365-46e5-b8b1-48f933611ab9-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "cc11fd89-0365-46e5-b8b1-48f933611ab9" (UID: "cc11fd89-0365-46e5-b8b1-48f933611ab9"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.232819 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc11fd89-0365-46e5-b8b1-48f933611ab9-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "cc11fd89-0365-46e5-b8b1-48f933611ab9" (UID: "cc11fd89-0365-46e5-b8b1-48f933611ab9"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.235160 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-scripts" (OuterVolumeSpecName: "scripts") pod "cc11fd89-0365-46e5-b8b1-48f933611ab9" (UID: "cc11fd89-0365-46e5-b8b1-48f933611ab9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.235743 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc11fd89-0365-46e5-b8b1-48f933611ab9-kube-api-access-4h48c" (OuterVolumeSpecName: "kube-api-access-4h48c") pod "cc11fd89-0365-46e5-b8b1-48f933611ab9" (UID: "cc11fd89-0365-46e5-b8b1-48f933611ab9"). InnerVolumeSpecName "kube-api-access-4h48c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.273977 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "cc11fd89-0365-46e5-b8b1-48f933611ab9" (UID: "cc11fd89-0365-46e5-b8b1-48f933611ab9"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.282311 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "cc11fd89-0365-46e5-b8b1-48f933611ab9" (UID: "cc11fd89-0365-46e5-b8b1-48f933611ab9"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.301063 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cc11fd89-0365-46e5-b8b1-48f933611ab9" (UID: "cc11fd89-0365-46e5-b8b1-48f933611ab9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.315341 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-config-data" (OuterVolumeSpecName: "config-data") pod "cc11fd89-0365-46e5-b8b1-48f933611ab9" (UID: "cc11fd89-0365-46e5-b8b1-48f933611ab9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.333340 4857 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.333578 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.333644 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.333697 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4h48c\" (UniqueName: \"kubernetes.io/projected/cc11fd89-0365-46e5-b8b1-48f933611ab9-kube-api-access-4h48c\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.333797 4857 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.333853 4857 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc11fd89-0365-46e5-b8b1-48f933611ab9-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.333903 4857 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc11fd89-0365-46e5-b8b1-48f933611ab9-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.333962 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc11fd89-0365-46e5-b8b1-48f933611ab9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.770892 4857 generic.go:334] "Generic (PLEG): container finished" podID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerID="bcfc962c58adc4335b9f934de14d9b2330cea19877b855df32060d57a9431c59" exitCode=0 Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.770990 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc11fd89-0365-46e5-b8b1-48f933611ab9","Type":"ContainerDied","Data":"bcfc962c58adc4335b9f934de14d9b2330cea19877b855df32060d57a9431c59"} Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.771020 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc11fd89-0365-46e5-b8b1-48f933611ab9","Type":"ContainerDied","Data":"d30b11297e0ac5eab21bf0d14af080c711877cf97dca7a255c7c61b7980bb439"} Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.771038 4857 scope.go:117] "RemoveContainer" containerID="758611f27d908e2a9d4f2cb15d9c474f4f04bb2c788bba7c25fe962588bee8ea" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.771065 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.793230 4857 scope.go:117] "RemoveContainer" containerID="53c5151a4983e3c03ad2115ba0190cda5364aa8956976486e4a3dda5c19894bc" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.817723 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.823657 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.825342 4857 scope.go:117] "RemoveContainer" containerID="bcfc962c58adc4335b9f934de14d9b2330cea19877b855df32060d57a9431c59" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.842998 4857 scope.go:117] "RemoveContainer" containerID="3e988a9d71b894b528cad9cf749fa687e397d909171f243b57b66253d5c4fcf4" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.862330 4857 scope.go:117] "RemoveContainer" containerID="758611f27d908e2a9d4f2cb15d9c474f4f04bb2c788bba7c25fe962588bee8ea" Nov 28 13:44:17 crc kubenswrapper[4857]: E1128 13:44:17.864101 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"758611f27d908e2a9d4f2cb15d9c474f4f04bb2c788bba7c25fe962588bee8ea\": container with ID starting with 758611f27d908e2a9d4f2cb15d9c474f4f04bb2c788bba7c25fe962588bee8ea not found: ID does not exist" containerID="758611f27d908e2a9d4f2cb15d9c474f4f04bb2c788bba7c25fe962588bee8ea" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.864134 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"758611f27d908e2a9d4f2cb15d9c474f4f04bb2c788bba7c25fe962588bee8ea"} err="failed to get container status \"758611f27d908e2a9d4f2cb15d9c474f4f04bb2c788bba7c25fe962588bee8ea\": rpc error: code = NotFound desc = could not find container \"758611f27d908e2a9d4f2cb15d9c474f4f04bb2c788bba7c25fe962588bee8ea\": container with ID starting with 758611f27d908e2a9d4f2cb15d9c474f4f04bb2c788bba7c25fe962588bee8ea not found: ID does not exist" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.864158 4857 scope.go:117] "RemoveContainer" containerID="53c5151a4983e3c03ad2115ba0190cda5364aa8956976486e4a3dda5c19894bc" Nov 28 13:44:17 crc kubenswrapper[4857]: E1128 13:44:17.864432 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"53c5151a4983e3c03ad2115ba0190cda5364aa8956976486e4a3dda5c19894bc\": container with ID starting with 53c5151a4983e3c03ad2115ba0190cda5364aa8956976486e4a3dda5c19894bc not found: ID does not exist" containerID="53c5151a4983e3c03ad2115ba0190cda5364aa8956976486e4a3dda5c19894bc" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.864456 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53c5151a4983e3c03ad2115ba0190cda5364aa8956976486e4a3dda5c19894bc"} err="failed to get container status \"53c5151a4983e3c03ad2115ba0190cda5364aa8956976486e4a3dda5c19894bc\": rpc error: code = NotFound desc = could not find container \"53c5151a4983e3c03ad2115ba0190cda5364aa8956976486e4a3dda5c19894bc\": container with ID starting with 53c5151a4983e3c03ad2115ba0190cda5364aa8956976486e4a3dda5c19894bc not found: ID does not exist" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.864472 4857 scope.go:117] "RemoveContainer" containerID="bcfc962c58adc4335b9f934de14d9b2330cea19877b855df32060d57a9431c59" Nov 28 13:44:17 crc kubenswrapper[4857]: E1128 13:44:17.865702 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bcfc962c58adc4335b9f934de14d9b2330cea19877b855df32060d57a9431c59\": container with ID starting with bcfc962c58adc4335b9f934de14d9b2330cea19877b855df32060d57a9431c59 not found: ID does not exist" containerID="bcfc962c58adc4335b9f934de14d9b2330cea19877b855df32060d57a9431c59" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.865731 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bcfc962c58adc4335b9f934de14d9b2330cea19877b855df32060d57a9431c59"} err="failed to get container status \"bcfc962c58adc4335b9f934de14d9b2330cea19877b855df32060d57a9431c59\": rpc error: code = NotFound desc = could not find container \"bcfc962c58adc4335b9f934de14d9b2330cea19877b855df32060d57a9431c59\": container with ID starting with bcfc962c58adc4335b9f934de14d9b2330cea19877b855df32060d57a9431c59 not found: ID does not exist" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.865767 4857 scope.go:117] "RemoveContainer" containerID="3e988a9d71b894b528cad9cf749fa687e397d909171f243b57b66253d5c4fcf4" Nov 28 13:44:17 crc kubenswrapper[4857]: E1128 13:44:17.867344 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e988a9d71b894b528cad9cf749fa687e397d909171f243b57b66253d5c4fcf4\": container with ID starting with 3e988a9d71b894b528cad9cf749fa687e397d909171f243b57b66253d5c4fcf4 not found: ID does not exist" containerID="3e988a9d71b894b528cad9cf749fa687e397d909171f243b57b66253d5c4fcf4" Nov 28 13:44:17 crc kubenswrapper[4857]: I1128 13:44:17.867388 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e988a9d71b894b528cad9cf749fa687e397d909171f243b57b66253d5c4fcf4"} err="failed to get container status \"3e988a9d71b894b528cad9cf749fa687e397d909171f243b57b66253d5c4fcf4\": rpc error: code = NotFound desc = could not find container \"3e988a9d71b894b528cad9cf749fa687e397d909171f243b57b66253d5c4fcf4\": container with ID starting with 3e988a9d71b894b528cad9cf749fa687e397d909171f243b57b66253d5c4fcf4 not found: ID does not exist" Nov 28 13:44:18 crc kubenswrapper[4857]: I1128 13:44:18.331897 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6da2e15b-2a40-4bf6-8502-5ac68921b525" path="/var/lib/kubelet/pods/6da2e15b-2a40-4bf6-8502-5ac68921b525/volumes" Nov 28 13:44:18 crc kubenswrapper[4857]: I1128 13:44:18.333340 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71cc1f00-1a63-428e-8f12-2136ab077860" path="/var/lib/kubelet/pods/71cc1f00-1a63-428e-8f12-2136ab077860/volumes" Nov 28 13:44:18 crc kubenswrapper[4857]: I1128 13:44:18.334693 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96945c0e-06fd-4880-9fc5-b1be1e15474d" path="/var/lib/kubelet/pods/96945c0e-06fd-4880-9fc5-b1be1e15474d/volumes" Nov 28 13:44:18 crc kubenswrapper[4857]: I1128 13:44:18.336832 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="adfd05de-d1db-45d3-aea1-b35dc0110b71" path="/var/lib/kubelet/pods/adfd05de-d1db-45d3-aea1-b35dc0110b71/volumes" Nov 28 13:44:18 crc kubenswrapper[4857]: I1128 13:44:18.337897 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf861df0-ad6e-4a39-9932-395afa59e76d" path="/var/lib/kubelet/pods/bf861df0-ad6e-4a39-9932-395afa59e76d/volumes" Nov 28 13:44:18 crc kubenswrapper[4857]: I1128 13:44:18.339022 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc11fd89-0365-46e5-b8b1-48f933611ab9" path="/var/lib/kubelet/pods/cc11fd89-0365-46e5-b8b1-48f933611ab9/volumes" Nov 28 13:44:18 crc kubenswrapper[4857]: I1128 13:44:18.341863 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cfbd0457-d459-4bf2-bdaf-8b61db5cce65" path="/var/lib/kubelet/pods/cfbd0457-d459-4bf2-bdaf-8b61db5cce65/volumes" Nov 28 13:44:18 crc kubenswrapper[4857]: I1128 13:44:18.346566 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dff509a4-6719-4a14-9a20-f07a13717d90" path="/var/lib/kubelet/pods/dff509a4-6719-4a14-9a20-f07a13717d90/volumes" Nov 28 13:44:18 crc kubenswrapper[4857]: I1128 13:44:18.363504 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1ac27d1-5ad2-40ed-af2b-18668e48ead3" path="/var/lib/kubelet/pods/e1ac27d1-5ad2-40ed-af2b-18668e48ead3/volumes" Nov 28 13:44:18 crc kubenswrapper[4857]: I1128 13:44:18.365075 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f33c02be-6f4d-4b53-b1fa-4b97297bde64" path="/var/lib/kubelet/pods/f33c02be-6f4d-4b53-b1fa-4b97297bde64/volumes" Nov 28 13:44:18 crc kubenswrapper[4857]: I1128 13:44:18.606583 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-575548d9c6-4zx6z" podUID="0d0c82d5-b320-444c-a4d9-838ca3097157" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.162:9311/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 13:44:18 crc kubenswrapper[4857]: I1128 13:44:18.606613 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-575548d9c6-4zx6z" podUID="0d0c82d5-b320-444c-a4d9-838ca3097157" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.162:9311/healthcheck\": dial tcp 10.217.0.162:9311: i/o timeout" Nov 28 13:44:20 crc kubenswrapper[4857]: E1128 13:44:20.515643 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:44:20 crc kubenswrapper[4857]: E1128 13:44:20.516376 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:44:20 crc kubenswrapper[4857]: E1128 13:44:20.517198 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:44:20 crc kubenswrapper[4857]: E1128 13:44:20.517517 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:44:20 crc kubenswrapper[4857]: E1128 13:44:20.517584 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-ph2cf" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovsdb-server" Nov 28 13:44:20 crc kubenswrapper[4857]: E1128 13:44:20.519011 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:44:20 crc kubenswrapper[4857]: E1128 13:44:20.520206 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:44:20 crc kubenswrapper[4857]: E1128 13:44:20.520247 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-ph2cf" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovs-vswitchd" Nov 28 13:44:20 crc kubenswrapper[4857]: E1128 13:44:20.888612 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:20 crc kubenswrapper[4857]: E1128 13:44:20.888704 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts podName:6f75b361-6a38-42a4-971c-1b3a68a3f10f nodeName:}" failed. No retries permitted until 2025-11-28 13:44:28.888685947 +0000 UTC m=+1560.916061114 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts") pod "novaapi7cc9-account-delete-qjqg5" (UID: "6f75b361-6a38-42a4-971c-1b3a68a3f10f") : configmap "openstack-scripts" not found Nov 28 13:44:20 crc kubenswrapper[4857]: E1128 13:44:20.888629 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:20 crc kubenswrapper[4857]: E1128 13:44:20.888816 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts podName:ad7bc32b-e1f1-4ce5-a094-56f37d676131 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:28.88880077 +0000 UTC m=+1560.916175937 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts") pod "novacell032fc-account-delete-xk7xx" (UID: "ad7bc32b-e1f1-4ce5-a094-56f37d676131") : configmap "openstack-scripts" not found Nov 28 13:44:20 crc kubenswrapper[4857]: E1128 13:44:20.994589 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:20 crc kubenswrapper[4857]: E1128 13:44:20.994626 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:20 crc kubenswrapper[4857]: E1128 13:44:20.994686 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts podName:d8c0e041-9c74-4a06-a966-833e919e745a nodeName:}" failed. No retries permitted until 2025-11-28 13:44:28.994666601 +0000 UTC m=+1561.022041768 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts") pod "barbican7dd8-account-delete-jg2j5" (UID: "d8c0e041-9c74-4a06-a966-833e919e745a") : configmap "openstack-scripts" not found Nov 28 13:44:20 crc kubenswrapper[4857]: E1128 13:44:20.994707 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts podName:24a3dca4-a3d0-479d-9be8-fb8c16f97a77 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:28.994700472 +0000 UTC m=+1561.022075639 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts") pod "cinderd8b3-account-delete-lxwj8" (UID: "24a3dca4-a3d0-479d-9be8-fb8c16f97a77") : configmap "openstack-scripts" not found Nov 28 13:44:25 crc kubenswrapper[4857]: E1128 13:44:25.515796 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:44:25 crc kubenswrapper[4857]: E1128 13:44:25.517204 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:44:25 crc kubenswrapper[4857]: E1128 13:44:25.520231 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:44:25 crc kubenswrapper[4857]: E1128 13:44:25.520937 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:44:25 crc kubenswrapper[4857]: E1128 13:44:25.521038 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-ph2cf" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovsdb-server" Nov 28 13:44:25 crc kubenswrapper[4857]: E1128 13:44:25.526192 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:44:25 crc kubenswrapper[4857]: E1128 13:44:25.529496 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:44:25 crc kubenswrapper[4857]: E1128 13:44:25.529590 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-ph2cf" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovs-vswitchd" Nov 28 13:44:26 crc kubenswrapper[4857]: I1128 13:44:26.900219 4857 generic.go:334] "Generic (PLEG): container finished" podID="151aff2f-7aaa-4964-8f75-51c8faf86397" containerID="a6df09e46a84bd7457d48eb96b91f30ec9076cb7712cd5e8e714009a5e5ee6d2" exitCode=0 Nov 28 13:44:26 crc kubenswrapper[4857]: I1128 13:44:26.900267 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7d4894d65-gqnvs" event={"ID":"151aff2f-7aaa-4964-8f75-51c8faf86397","Type":"ContainerDied","Data":"a6df09e46a84bd7457d48eb96b91f30ec9076cb7712cd5e8e714009a5e5ee6d2"} Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.092105 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.195449 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-ovndb-tls-certs\") pod \"151aff2f-7aaa-4964-8f75-51c8faf86397\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.195504 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-combined-ca-bundle\") pod \"151aff2f-7aaa-4964-8f75-51c8faf86397\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.195541 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-public-tls-certs\") pod \"151aff2f-7aaa-4964-8f75-51c8faf86397\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.195564 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-internal-tls-certs\") pod \"151aff2f-7aaa-4964-8f75-51c8faf86397\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.195599 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-config\") pod \"151aff2f-7aaa-4964-8f75-51c8faf86397\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.195640 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-httpd-config\") pod \"151aff2f-7aaa-4964-8f75-51c8faf86397\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.195697 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-849t6\" (UniqueName: \"kubernetes.io/projected/151aff2f-7aaa-4964-8f75-51c8faf86397-kube-api-access-849t6\") pod \"151aff2f-7aaa-4964-8f75-51c8faf86397\" (UID: \"151aff2f-7aaa-4964-8f75-51c8faf86397\") " Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.205224 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "151aff2f-7aaa-4964-8f75-51c8faf86397" (UID: "151aff2f-7aaa-4964-8f75-51c8faf86397"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.205425 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/151aff2f-7aaa-4964-8f75-51c8faf86397-kube-api-access-849t6" (OuterVolumeSpecName: "kube-api-access-849t6") pod "151aff2f-7aaa-4964-8f75-51c8faf86397" (UID: "151aff2f-7aaa-4964-8f75-51c8faf86397"). InnerVolumeSpecName "kube-api-access-849t6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.244193 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "151aff2f-7aaa-4964-8f75-51c8faf86397" (UID: "151aff2f-7aaa-4964-8f75-51c8faf86397"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.251627 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "151aff2f-7aaa-4964-8f75-51c8faf86397" (UID: "151aff2f-7aaa-4964-8f75-51c8faf86397"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.255639 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "151aff2f-7aaa-4964-8f75-51c8faf86397" (UID: "151aff2f-7aaa-4964-8f75-51c8faf86397"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.263218 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "151aff2f-7aaa-4964-8f75-51c8faf86397" (UID: "151aff2f-7aaa-4964-8f75-51c8faf86397"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.272463 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-config" (OuterVolumeSpecName: "config") pod "151aff2f-7aaa-4964-8f75-51c8faf86397" (UID: "151aff2f-7aaa-4964-8f75-51c8faf86397"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.297122 4857 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.297159 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.297168 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.297178 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.297187 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.297196 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/151aff2f-7aaa-4964-8f75-51c8faf86397-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.297206 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-849t6\" (UniqueName: \"kubernetes.io/projected/151aff2f-7aaa-4964-8f75-51c8faf86397-kube-api-access-849t6\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.913636 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7d4894d65-gqnvs" event={"ID":"151aff2f-7aaa-4964-8f75-51c8faf86397","Type":"ContainerDied","Data":"142e02233648c3cfe3264c0b78da1d10fe0c212642b98af63993ac8735fc68e1"} Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.913699 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7d4894d65-gqnvs" Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.913727 4857 scope.go:117] "RemoveContainer" containerID="ec785d0624d75a82e22bd01f7edfc8b3b369f0fa8f2251c36725e8484e0c04f0" Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.949786 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7d4894d65-gqnvs"] Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.957923 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7d4894d65-gqnvs"] Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.962531 4857 scope.go:117] "RemoveContainer" containerID="a6df09e46a84bd7457d48eb96b91f30ec9076cb7712cd5e8e714009a5e5ee6d2" Nov 28 13:44:28 crc kubenswrapper[4857]: I1128 13:44:28.318291 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="151aff2f-7aaa-4964-8f75-51c8faf86397" path="/var/lib/kubelet/pods/151aff2f-7aaa-4964-8f75-51c8faf86397/volumes" Nov 28 13:44:28 crc kubenswrapper[4857]: E1128 13:44:28.928948 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:28 crc kubenswrapper[4857]: E1128 13:44:28.929017 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts podName:ad7bc32b-e1f1-4ce5-a094-56f37d676131 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:44.929001669 +0000 UTC m=+1576.956376836 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts") pod "novacell032fc-account-delete-xk7xx" (UID: "ad7bc32b-e1f1-4ce5-a094-56f37d676131") : configmap "openstack-scripts" not found Nov 28 13:44:28 crc kubenswrapper[4857]: E1128 13:44:28.928948 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:28 crc kubenswrapper[4857]: E1128 13:44:28.929107 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts podName:6f75b361-6a38-42a4-971c-1b3a68a3f10f nodeName:}" failed. No retries permitted until 2025-11-28 13:44:44.929094261 +0000 UTC m=+1576.956469428 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts") pod "novaapi7cc9-account-delete-qjqg5" (UID: "6f75b361-6a38-42a4-971c-1b3a68a3f10f") : configmap "openstack-scripts" not found Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.031074 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.031159 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.031185 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts podName:d8c0e041-9c74-4a06-a966-833e919e745a nodeName:}" failed. No retries permitted until 2025-11-28 13:44:45.031160213 +0000 UTC m=+1577.058535410 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts") pod "barbican7dd8-account-delete-jg2j5" (UID: "d8c0e041-9c74-4a06-a966-833e919e745a") : configmap "openstack-scripts" not found Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.031228 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts podName:24a3dca4-a3d0-479d-9be8-fb8c16f97a77 nodeName:}" failed. No retries permitted until 2025-11-28 13:44:45.031208184 +0000 UTC m=+1577.058583391 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts") pod "cinderd8b3-account-delete-lxwj8" (UID: "24a3dca4-a3d0-479d-9be8-fb8c16f97a77") : configmap "openstack-scripts" not found Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258017 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ws79r"] Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258371 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a952329-a8d9-432d-ac5b-d88b7e2ede6b" containerName="glance-log" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258390 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a952329-a8d9-432d-ac5b-d88b7e2ede6b" containerName="glance-log" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258410 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64da16e3-099d-4def-9656-91f40d64672f" containerName="nova-api-api" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258418 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="64da16e3-099d-4def-9656-91f40d64672f" containerName="nova-api-api" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258434 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d0c82d5-b320-444c-a4d9-838ca3097157" containerName="barbican-api-log" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258443 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d0c82d5-b320-444c-a4d9-838ca3097157" containerName="barbican-api-log" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258458 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89adcb9a-b993-4e60-ae3b-413bed35ae0d" containerName="mariadb-account-delete" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258466 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="89adcb9a-b993-4e60-ae3b-413bed35ae0d" containerName="mariadb-account-delete" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258482 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="151aff2f-7aaa-4964-8f75-51c8faf86397" containerName="neutron-api" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258489 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="151aff2f-7aaa-4964-8f75-51c8faf86397" containerName="neutron-api" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258500 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7358aa80-dbe4-4a31-ad84-9dc125491046" containerName="proxy-httpd" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258508 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7358aa80-dbe4-4a31-ad84-9dc125491046" containerName="proxy-httpd" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258520 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4477c075-9151-49cc-bb52-82dc34ea46ec" containerName="cinder-api" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258527 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4477c075-9151-49cc-bb52-82dc34ea46ec" containerName="cinder-api" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258538 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd" containerName="nova-cell0-conductor-conductor" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258546 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd" containerName="nova-cell0-conductor-conductor" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258559 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="946c0669-4c99-46b7-a9ff-437042383642" containerName="placement-api" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258567 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="946c0669-4c99-46b7-a9ff-437042383642" containerName="placement-api" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258582 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71cc1f00-1a63-428e-8f12-2136ab077860" containerName="rabbitmq" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258590 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="71cc1f00-1a63-428e-8f12-2136ab077860" containerName="rabbitmq" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258601 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="adfd05de-d1db-45d3-aea1-b35dc0110b71" containerName="keystone-api" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258609 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="adfd05de-d1db-45d3-aea1-b35dc0110b71" containerName="keystone-api" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258621 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bee7127-9367-4882-8ab1-0493128d2641" containerName="glance-log" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258628 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bee7127-9367-4882-8ab1-0493128d2641" containerName="glance-log" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258640 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bee7127-9367-4882-8ab1-0493128d2641" containerName="glance-httpd" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258647 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bee7127-9367-4882-8ab1-0493128d2641" containerName="glance-httpd" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258662 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7bf9e28-fd40-4b0d-aac9-995eff12a115" containerName="nova-cell1-conductor-conductor" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258670 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7bf9e28-fd40-4b0d-aac9-995eff12a115" containerName="nova-cell1-conductor-conductor" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258679 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="076d849e-fd88-4add-a5f9-e45a1983a606" containerName="ovn-northd" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258687 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="076d849e-fd88-4add-a5f9-e45a1983a606" containerName="ovn-northd" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258699 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" containerName="nova-metadata-log" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258708 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" containerName="nova-metadata-log" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258721 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64da16e3-099d-4def-9656-91f40d64672f" containerName="nova-api-log" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258731 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="64da16e3-099d-4def-9656-91f40d64672f" containerName="nova-api-log" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258743 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b0c1834-7ece-4d9c-9cf1-28a53aea280e" containerName="galera" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258776 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b0c1834-7ece-4d9c-9cf1-28a53aea280e" containerName="galera" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258796 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71cc1f00-1a63-428e-8f12-2136ab077860" containerName="setup-container" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258806 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="71cc1f00-1a63-428e-8f12-2136ab077860" containerName="setup-container" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258820 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4477c075-9151-49cc-bb52-82dc34ea46ec" containerName="cinder-api-log" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258830 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4477c075-9151-49cc-bb52-82dc34ea46ec" containerName="cinder-api-log" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258841 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="310b8699-5d0c-4cce-b8fd-90ccedc2ce85" containerName="nova-scheduler-scheduler" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258852 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="310b8699-5d0c-4cce-b8fd-90ccedc2ce85" containerName="nova-scheduler-scheduler" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258872 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30a2b522-ef43-4b0a-8215-2bb928744e00" containerName="memcached" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258881 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="30a2b522-ef43-4b0a-8215-2bb928744e00" containerName="memcached" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258894 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfbd0457-d459-4bf2-bdaf-8b61db5cce65" containerName="setup-container" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258901 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfbd0457-d459-4bf2-bdaf-8b61db5cce65" containerName="setup-container" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258912 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a952329-a8d9-432d-ac5b-d88b7e2ede6b" containerName="glance-httpd" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258919 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a952329-a8d9-432d-ac5b-d88b7e2ede6b" containerName="glance-httpd" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258929 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1ac27d1-5ad2-40ed-af2b-18668e48ead3" containerName="mariadb-account-delete" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258937 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1ac27d1-5ad2-40ed-af2b-18668e48ead3" containerName="mariadb-account-delete" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258947 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerName="ceilometer-notification-agent" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258954 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerName="ceilometer-notification-agent" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258963 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf861df0-ad6e-4a39-9932-395afa59e76d" containerName="mariadb-account-delete" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258970 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf861df0-ad6e-4a39-9932-395afa59e76d" containerName="mariadb-account-delete" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.258984 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfbd0457-d459-4bf2-bdaf-8b61db5cce65" containerName="rabbitmq" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.258991 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfbd0457-d459-4bf2-bdaf-8b61db5cce65" containerName="rabbitmq" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.259003 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d0c82d5-b320-444c-a4d9-838ca3097157" containerName="barbican-api" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259011 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d0c82d5-b320-444c-a4d9-838ca3097157" containerName="barbican-api" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.259022 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fa6d725-8054-46f1-8c0c-c693d5306563" containerName="kube-state-metrics" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259031 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fa6d725-8054-46f1-8c0c-c693d5306563" containerName="kube-state-metrics" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.259049 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b0c1834-7ece-4d9c-9cf1-28a53aea280e" containerName="mysql-bootstrap" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259057 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b0c1834-7ece-4d9c-9cf1-28a53aea280e" containerName="mysql-bootstrap" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.259069 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerName="sg-core" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259076 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerName="sg-core" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.259089 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="076d849e-fd88-4add-a5f9-e45a1983a606" containerName="openstack-network-exporter" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259097 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="076d849e-fd88-4add-a5f9-e45a1983a606" containerName="openstack-network-exporter" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.259111 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="151aff2f-7aaa-4964-8f75-51c8faf86397" containerName="neutron-httpd" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259119 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="151aff2f-7aaa-4964-8f75-51c8faf86397" containerName="neutron-httpd" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.259130 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="946c0669-4c99-46b7-a9ff-437042383642" containerName="placement-log" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259138 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="946c0669-4c99-46b7-a9ff-437042383642" containerName="placement-log" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.259149 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerName="proxy-httpd" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259156 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerName="proxy-httpd" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.259166 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerName="ceilometer-central-agent" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259173 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerName="ceilometer-central-agent" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.259186 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" containerName="nova-metadata-metadata" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259194 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" containerName="nova-metadata-metadata" Nov 28 13:44:29 crc kubenswrapper[4857]: E1128 13:44:29.259207 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7358aa80-dbe4-4a31-ad84-9dc125491046" containerName="proxy-server" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259214 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7358aa80-dbe4-4a31-ad84-9dc125491046" containerName="proxy-server" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259424 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="151aff2f-7aaa-4964-8f75-51c8faf86397" containerName="neutron-api" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259445 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b0c1834-7ece-4d9c-9cf1-28a53aea280e" containerName="galera" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259461 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="30a2b522-ef43-4b0a-8215-2bb928744e00" containerName="memcached" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259471 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="71cc1f00-1a63-428e-8f12-2136ab077860" containerName="rabbitmq" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259488 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bee7127-9367-4882-8ab1-0493128d2641" containerName="glance-log" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259502 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerName="proxy-httpd" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259521 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a952329-a8d9-432d-ac5b-d88b7e2ede6b" containerName="glance-log" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259537 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d0c82d5-b320-444c-a4d9-838ca3097157" containerName="barbican-api-log" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259552 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7358aa80-dbe4-4a31-ad84-9dc125491046" containerName="proxy-server" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259567 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="64da16e3-099d-4def-9656-91f40d64672f" containerName="nova-api-log" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259577 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="310b8699-5d0c-4cce-b8fd-90ccedc2ce85" containerName="nova-scheduler-scheduler" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259591 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a952329-a8d9-432d-ac5b-d88b7e2ede6b" containerName="glance-httpd" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259606 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" containerName="nova-metadata-log" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259617 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerName="ceilometer-notification-agent" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259630 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="1fa6d725-8054-46f1-8c0c-c693d5306563" containerName="kube-state-metrics" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259641 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="adfd05de-d1db-45d3-aea1-b35dc0110b71" containerName="keystone-api" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259655 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d0c82d5-b320-444c-a4d9-838ca3097157" containerName="barbican-api" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259667 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1ac27d1-5ad2-40ed-af2b-18668e48ead3" containerName="mariadb-account-delete" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259686 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerName="sg-core" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259701 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea2604b9-e3ca-4145-b8c3-42a9b8e3b286" containerName="nova-metadata-metadata" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259710 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfbd0457-d459-4bf2-bdaf-8b61db5cce65" containerName="rabbitmq" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259725 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf861df0-ad6e-4a39-9932-395afa59e76d" containerName="mariadb-account-delete" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259736 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="64da16e3-099d-4def-9656-91f40d64672f" containerName="nova-api-api" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259777 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="89adcb9a-b993-4e60-ae3b-413bed35ae0d" containerName="mariadb-account-delete" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259797 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="076d849e-fd88-4add-a5f9-e45a1983a606" containerName="ovn-northd" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259814 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4477c075-9151-49cc-bb52-82dc34ea46ec" containerName="cinder-api-log" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259833 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="076d849e-fd88-4add-a5f9-e45a1983a606" containerName="openstack-network-exporter" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259848 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="151aff2f-7aaa-4964-8f75-51c8faf86397" containerName="neutron-httpd" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259860 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bee7127-9367-4882-8ab1-0493128d2641" containerName="glance-httpd" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259901 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="946c0669-4c99-46b7-a9ff-437042383642" containerName="placement-api" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259917 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4477c075-9151-49cc-bb52-82dc34ea46ec" containerName="cinder-api" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259931 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7bf9e28-fd40-4b0d-aac9-995eff12a115" containerName="nova-cell1-conductor-conductor" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259944 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc11fd89-0365-46e5-b8b1-48f933611ab9" containerName="ceilometer-central-agent" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259957 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7358aa80-dbe4-4a31-ad84-9dc125491046" containerName="proxy-httpd" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259970 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="946c0669-4c99-46b7-a9ff-437042383642" containerName="placement-log" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.259980 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bfec6cb-0f61-4d34-a40d-f29cc5dc10cd" containerName="nova-cell0-conductor-conductor" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.262008 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ws79r" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.279491 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ws79r"] Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.334819 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9s5tg\" (UniqueName: \"kubernetes.io/projected/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9-kube-api-access-9s5tg\") pod \"certified-operators-ws79r\" (UID: \"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9\") " pod="openshift-marketplace/certified-operators-ws79r" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.334931 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9-utilities\") pod \"certified-operators-ws79r\" (UID: \"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9\") " pod="openshift-marketplace/certified-operators-ws79r" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.335019 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9-catalog-content\") pod \"certified-operators-ws79r\" (UID: \"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9\") " pod="openshift-marketplace/certified-operators-ws79r" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.436733 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9-utilities\") pod \"certified-operators-ws79r\" (UID: \"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9\") " pod="openshift-marketplace/certified-operators-ws79r" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.436817 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9-catalog-content\") pod \"certified-operators-ws79r\" (UID: \"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9\") " pod="openshift-marketplace/certified-operators-ws79r" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.436938 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9s5tg\" (UniqueName: \"kubernetes.io/projected/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9-kube-api-access-9s5tg\") pod \"certified-operators-ws79r\" (UID: \"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9\") " pod="openshift-marketplace/certified-operators-ws79r" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.437441 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9-utilities\") pod \"certified-operators-ws79r\" (UID: \"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9\") " pod="openshift-marketplace/certified-operators-ws79r" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.437995 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9-catalog-content\") pod \"certified-operators-ws79r\" (UID: \"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9\") " pod="openshift-marketplace/certified-operators-ws79r" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.456848 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9s5tg\" (UniqueName: \"kubernetes.io/projected/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9-kube-api-access-9s5tg\") pod \"certified-operators-ws79r\" (UID: \"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9\") " pod="openshift-marketplace/certified-operators-ws79r" Nov 28 13:44:29 crc kubenswrapper[4857]: I1128 13:44:29.584271 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ws79r" Nov 28 13:44:30 crc kubenswrapper[4857]: I1128 13:44:30.055545 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ws79r"] Nov 28 13:44:30 crc kubenswrapper[4857]: E1128 13:44:30.516980 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:44:30 crc kubenswrapper[4857]: E1128 13:44:30.517695 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:44:30 crc kubenswrapper[4857]: E1128 13:44:30.518033 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:44:30 crc kubenswrapper[4857]: E1128 13:44:30.518574 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:44:30 crc kubenswrapper[4857]: E1128 13:44:30.518646 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-ph2cf" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovsdb-server" Nov 28 13:44:30 crc kubenswrapper[4857]: E1128 13:44:30.520153 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:44:30 crc kubenswrapper[4857]: E1128 13:44:30.522371 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:44:30 crc kubenswrapper[4857]: E1128 13:44:30.522432 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-ph2cf" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovs-vswitchd" Nov 28 13:44:30 crc kubenswrapper[4857]: I1128 13:44:30.946386 4857 generic.go:334] "Generic (PLEG): container finished" podID="bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9" containerID="4b6ab22beccd7ebed0cb247e67acec38f57a9a5fc21c45a9dbe4f6611c070ce6" exitCode=0 Nov 28 13:44:30 crc kubenswrapper[4857]: I1128 13:44:30.946462 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ws79r" event={"ID":"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9","Type":"ContainerDied","Data":"4b6ab22beccd7ebed0cb247e67acec38f57a9a5fc21c45a9dbe4f6611c070ce6"} Nov 28 13:44:30 crc kubenswrapper[4857]: I1128 13:44:30.946528 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ws79r" event={"ID":"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9","Type":"ContainerStarted","Data":"d5928abcb16a238cd7a14f9a1a2ff57e074102876231f034473d503918ab23e0"} Nov 28 13:44:31 crc kubenswrapper[4857]: I1128 13:44:31.957798 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ws79r" event={"ID":"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9","Type":"ContainerStarted","Data":"26a175955d7ba4f07400ba9d7cb332ee04810b059ce8e8148aeaaac7fdafb022"} Nov 28 13:44:32 crc kubenswrapper[4857]: I1128 13:44:32.970581 4857 generic.go:334] "Generic (PLEG): container finished" podID="bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9" containerID="26a175955d7ba4f07400ba9d7cb332ee04810b059ce8e8148aeaaac7fdafb022" exitCode=0 Nov 28 13:44:32 crc kubenswrapper[4857]: I1128 13:44:32.970634 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ws79r" event={"ID":"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9","Type":"ContainerDied","Data":"26a175955d7ba4f07400ba9d7cb332ee04810b059ce8e8148aeaaac7fdafb022"} Nov 28 13:44:33 crc kubenswrapper[4857]: I1128 13:44:33.177788 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:44:33 crc kubenswrapper[4857]: I1128 13:44:33.177857 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:44:33 crc kubenswrapper[4857]: I1128 13:44:33.177931 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:44:33 crc kubenswrapper[4857]: I1128 13:44:33.178673 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7"} pod="openshift-machine-config-operator/machine-config-daemon-jdgls" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 13:44:33 crc kubenswrapper[4857]: I1128 13:44:33.178776 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" containerID="cri-o://aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" gracePeriod=600 Nov 28 13:44:33 crc kubenswrapper[4857]: E1128 13:44:33.300458 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:44:33 crc kubenswrapper[4857]: I1128 13:44:33.982367 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ws79r" event={"ID":"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9","Type":"ContainerStarted","Data":"96cbcf0fddb176c928178baf9f24a83a10e99f950262eb47b1421bf8da25e8bf"} Nov 28 13:44:33 crc kubenswrapper[4857]: I1128 13:44:33.986213 4857 generic.go:334] "Generic (PLEG): container finished" podID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" exitCode=0 Nov 28 13:44:33 crc kubenswrapper[4857]: I1128 13:44:33.986256 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerDied","Data":"aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7"} Nov 28 13:44:33 crc kubenswrapper[4857]: I1128 13:44:33.986287 4857 scope.go:117] "RemoveContainer" containerID="e3860c9cd9dfa55680c98f69cece6eff0f08ced38d345f3573b02bd062397f7a" Nov 28 13:44:33 crc kubenswrapper[4857]: I1128 13:44:33.986912 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:44:33 crc kubenswrapper[4857]: E1128 13:44:33.987189 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:44:34 crc kubenswrapper[4857]: I1128 13:44:34.002485 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ws79r" podStartSLOduration=2.443813892 podStartE2EDuration="5.00246936s" podCreationTimestamp="2025-11-28 13:44:29 +0000 UTC" firstStartedPulling="2025-11-28 13:44:30.949789201 +0000 UTC m=+1562.977164408" lastFinishedPulling="2025-11-28 13:44:33.508444709 +0000 UTC m=+1565.535819876" observedRunningTime="2025-11-28 13:44:33.999384961 +0000 UTC m=+1566.026760128" watchObservedRunningTime="2025-11-28 13:44:34.00246936 +0000 UTC m=+1566.029844527" Nov 28 13:44:35 crc kubenswrapper[4857]: E1128 13:44:35.515317 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:44:35 crc kubenswrapper[4857]: E1128 13:44:35.517022 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:44:35 crc kubenswrapper[4857]: E1128 13:44:35.517045 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:44:35 crc kubenswrapper[4857]: E1128 13:44:35.517896 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:44:35 crc kubenswrapper[4857]: E1128 13:44:35.517972 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-ph2cf" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovsdb-server" Nov 28 13:44:35 crc kubenswrapper[4857]: E1128 13:44:35.518898 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:44:35 crc kubenswrapper[4857]: E1128 13:44:35.520092 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:44:35 crc kubenswrapper[4857]: E1128 13:44:35.520135 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-ph2cf" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovs-vswitchd" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.046202 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-ph2cf_c80a8609-29af-4833-856c-ee4094abcc0c/ovs-vswitchd/0.log" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.050989 4857 generic.go:334] "Generic (PLEG): container finished" podID="c80a8609-29af-4833-856c-ee4094abcc0c" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" exitCode=137 Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.051064 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-ph2cf" event={"ID":"c80a8609-29af-4833-856c-ee4094abcc0c","Type":"ContainerDied","Data":"8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a"} Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.058901 4857 generic.go:334] "Generic (PLEG): container finished" podID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerID="7b7ec0a8594688d16e29d5dbc41ca846eb39e0c6507989d1e1ae16d15e9b54b4" exitCode=137 Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.058961 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerDied","Data":"7b7ec0a8594688d16e29d5dbc41ca846eb39e0c6507989d1e1ae16d15e9b54b4"} Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.100692 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-ph2cf_c80a8609-29af-4833-856c-ee4094abcc0c/ovs-vswitchd/0.log" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.101683 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.112225 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.130358 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-var-log\") pod \"c80a8609-29af-4833-856c-ee4094abcc0c\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.130403 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-var-lib\") pod \"c80a8609-29af-4833-856c-ee4094abcc0c\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.130423 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"d5b28d1e-e702-4528-9964-72ad176a20b3\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.130453 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c80a8609-29af-4833-856c-ee4094abcc0c-scripts\") pod \"c80a8609-29af-4833-856c-ee4094abcc0c\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.130521 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-etc-ovs\") pod \"c80a8609-29af-4833-856c-ee4094abcc0c\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.130544 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86j52\" (UniqueName: \"kubernetes.io/projected/c80a8609-29af-4833-856c-ee4094abcc0c-kube-api-access-86j52\") pod \"c80a8609-29af-4833-856c-ee4094abcc0c\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.130591 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/d5b28d1e-e702-4528-9964-72ad176a20b3-lock\") pod \"d5b28d1e-e702-4528-9964-72ad176a20b3\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.130616 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wn8tz\" (UniqueName: \"kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-kube-api-access-wn8tz\") pod \"d5b28d1e-e702-4528-9964-72ad176a20b3\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.130637 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/d5b28d1e-e702-4528-9964-72ad176a20b3-cache\") pod \"d5b28d1e-e702-4528-9964-72ad176a20b3\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.130668 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-var-run\") pod \"c80a8609-29af-4833-856c-ee4094abcc0c\" (UID: \"c80a8609-29af-4833-856c-ee4094abcc0c\") " Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.130692 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift\") pod \"d5b28d1e-e702-4528-9964-72ad176a20b3\" (UID: \"d5b28d1e-e702-4528-9964-72ad176a20b3\") " Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.130849 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "c80a8609-29af-4833-856c-ee4094abcc0c" (UID: "c80a8609-29af-4833-856c-ee4094abcc0c"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.130888 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-var-log" (OuterVolumeSpecName: "var-log") pod "c80a8609-29af-4833-856c-ee4094abcc0c" (UID: "c80a8609-29af-4833-856c-ee4094abcc0c"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.130941 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-var-lib" (OuterVolumeSpecName: "var-lib") pod "c80a8609-29af-4833-856c-ee4094abcc0c" (UID: "c80a8609-29af-4833-856c-ee4094abcc0c"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.130969 4857 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-var-log\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.130980 4857 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-etc-ovs\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.131002 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-var-run" (OuterVolumeSpecName: "var-run") pod "c80a8609-29af-4833-856c-ee4094abcc0c" (UID: "c80a8609-29af-4833-856c-ee4094abcc0c"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.131393 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5b28d1e-e702-4528-9964-72ad176a20b3-cache" (OuterVolumeSpecName: "cache") pod "d5b28d1e-e702-4528-9964-72ad176a20b3" (UID: "d5b28d1e-e702-4528-9964-72ad176a20b3"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.133658 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5b28d1e-e702-4528-9964-72ad176a20b3-lock" (OuterVolumeSpecName: "lock") pod "d5b28d1e-e702-4528-9964-72ad176a20b3" (UID: "d5b28d1e-e702-4528-9964-72ad176a20b3"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.134835 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c80a8609-29af-4833-856c-ee4094abcc0c-scripts" (OuterVolumeSpecName: "scripts") pod "c80a8609-29af-4833-856c-ee4094abcc0c" (UID: "c80a8609-29af-4833-856c-ee4094abcc0c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.136501 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "d5b28d1e-e702-4528-9964-72ad176a20b3" (UID: "d5b28d1e-e702-4528-9964-72ad176a20b3"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.137252 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "swift") pod "d5b28d1e-e702-4528-9964-72ad176a20b3" (UID: "d5b28d1e-e702-4528-9964-72ad176a20b3"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.137308 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-kube-api-access-wn8tz" (OuterVolumeSpecName: "kube-api-access-wn8tz") pod "d5b28d1e-e702-4528-9964-72ad176a20b3" (UID: "d5b28d1e-e702-4528-9964-72ad176a20b3"). InnerVolumeSpecName "kube-api-access-wn8tz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.137343 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c80a8609-29af-4833-856c-ee4094abcc0c-kube-api-access-86j52" (OuterVolumeSpecName: "kube-api-access-86j52") pod "c80a8609-29af-4833-856c-ee4094abcc0c" (UID: "c80a8609-29af-4833-856c-ee4094abcc0c"). InnerVolumeSpecName "kube-api-access-86j52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.233035 4857 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/d5b28d1e-e702-4528-9964-72ad176a20b3-cache\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.233067 4857 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.233076 4857 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.233091 4857 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c80a8609-29af-4833-856c-ee4094abcc0c-var-lib\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.233114 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.233123 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c80a8609-29af-4833-856c-ee4094abcc0c-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.233136 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86j52\" (UniqueName: \"kubernetes.io/projected/c80a8609-29af-4833-856c-ee4094abcc0c-kube-api-access-86j52\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.233146 4857 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/d5b28d1e-e702-4528-9964-72ad176a20b3-lock\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.233155 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wn8tz\" (UniqueName: \"kubernetes.io/projected/d5b28d1e-e702-4528-9964-72ad176a20b3-kube-api-access-wn8tz\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.250943 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.333678 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.585087 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ws79r" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.585187 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ws79r" Nov 28 13:44:39 crc kubenswrapper[4857]: I1128 13:44:39.672185 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ws79r" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.076419 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"d5b28d1e-e702-4528-9964-72ad176a20b3","Type":"ContainerDied","Data":"11aedcbefb28558d19f31a8c3d03b54f3dd09a623275daa4f4ea2749768593c6"} Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.076535 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.076548 4857 scope.go:117] "RemoveContainer" containerID="f4326c863e90d24d084dbd8c33e41f8c3206bed85eb21a3ea86b5f28906b546e" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.078615 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-ph2cf_c80a8609-29af-4833-856c-ee4094abcc0c/ovs-vswitchd/0.log" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.081171 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-ph2cf" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.081236 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-ph2cf" event={"ID":"c80a8609-29af-4833-856c-ee4094abcc0c","Type":"ContainerDied","Data":"990556a080bb034b9f42137b7735b852486767ddf9a79c21d5d0d82ecc2e190d"} Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.112212 4857 scope.go:117] "RemoveContainer" containerID="a2fc2baa7d0114402b84cd8afc19e3b6af384d6ec48988a881766a0605e4b9fa" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.141221 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-ph2cf"] Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.151824 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-ph2cf"] Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.163864 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.166020 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ws79r" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.169327 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.170438 4857 scope.go:117] "RemoveContainer" containerID="ac8cee2bfbd683e0bf23daa4541d27abead299ecd058365d22a491cf7e370d73" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.214396 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ws79r"] Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.214855 4857 scope.go:117] "RemoveContainer" containerID="7c06071c7ab94c9d389a4416468a8dae45a8d25bf695e4f1589bbb97f67b9ff8" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.240103 4857 scope.go:117] "RemoveContainer" containerID="7b7ec0a8594688d16e29d5dbc41ca846eb39e0c6507989d1e1ae16d15e9b54b4" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.271592 4857 scope.go:117] "RemoveContainer" containerID="ca6e4b07ddb5ce36fc245cda1d9e032e507ab8f51871ae61fc6d91bf3d94fbcc" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.288406 4857 scope.go:117] "RemoveContainer" containerID="4d8e6435aaf1596d23240a79db43f33846ba61b7ae4b65eb49e14339421d4856" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.306246 4857 scope.go:117] "RemoveContainer" containerID="ee780d1664f73c1b0efc34f94bcba32ef69c9316883ef0a536cf48cc92544c85" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.318422 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" path="/var/lib/kubelet/pods/c80a8609-29af-4833-856c-ee4094abcc0c/volumes" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.319853 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" path="/var/lib/kubelet/pods/d5b28d1e-e702-4528-9964-72ad176a20b3/volumes" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.328439 4857 scope.go:117] "RemoveContainer" containerID="c3ca6f2469bf537185f1bdcbca3c0daa0bea4b5850c553e3aa9fc5b77b64d67a" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.349085 4857 scope.go:117] "RemoveContainer" containerID="1e96365ccd71754557edeab3d45001b8fe49eb13fc19f14f1ba33c6eb2378fc2" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.367247 4857 scope.go:117] "RemoveContainer" containerID="ce54c7d58ad42d61d735cc7c28384296c4ccdf392def1ceb2994e2fc57811e5e" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.381914 4857 scope.go:117] "RemoveContainer" containerID="cba71b9ab843952cf7d72667e396c7374c1e7e44e8883fc3704df6fae16f5f38" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.406961 4857 scope.go:117] "RemoveContainer" containerID="c31f95cb1b9a065f105c67a07bd5d5b7cf66901a282cda1c3bec560e21d74414" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.435334 4857 scope.go:117] "RemoveContainer" containerID="470dd259d2efea986a51d98e27e81921a7309bef3934a73e5e73feb96d784778" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.452714 4857 scope.go:117] "RemoveContainer" containerID="43f3af2bcb6a92ec4e0c79358397d8a0e3515b9b8ec39a557f85c39ba849f2e2" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.472035 4857 scope.go:117] "RemoveContainer" containerID="8daa77c6a75ac309fb1164e6118c9ceda262a9d1e1c41ac9229dc01914b7ee0a" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.494319 4857 scope.go:117] "RemoveContainer" containerID="7439ba7f48e6dd2d600f02ac55631283120e5765b3a2f10c9cc276ad9889be1c" Nov 28 13:44:40 crc kubenswrapper[4857]: I1128 13:44:40.513269 4857 scope.go:117] "RemoveContainer" containerID="e216cbd60b2d5cadbec0b1b5aaf62d4aa986739fbe4ec14e55bb470d382294b3" Nov 28 13:44:42 crc kubenswrapper[4857]: I1128 13:44:42.104293 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ws79r" podUID="bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9" containerName="registry-server" containerID="cri-o://96cbcf0fddb176c928178baf9f24a83a10e99f950262eb47b1421bf8da25e8bf" gracePeriod=2 Nov 28 13:44:42 crc kubenswrapper[4857]: I1128 13:44:42.507177 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ws79r" Nov 28 13:44:42 crc kubenswrapper[4857]: I1128 13:44:42.688115 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9s5tg\" (UniqueName: \"kubernetes.io/projected/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9-kube-api-access-9s5tg\") pod \"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9\" (UID: \"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9\") " Nov 28 13:44:42 crc kubenswrapper[4857]: I1128 13:44:42.688580 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9-catalog-content\") pod \"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9\" (UID: \"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9\") " Nov 28 13:44:42 crc kubenswrapper[4857]: I1128 13:44:42.688608 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9-utilities\") pod \"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9\" (UID: \"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9\") " Nov 28 13:44:42 crc kubenswrapper[4857]: I1128 13:44:42.690118 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9-utilities" (OuterVolumeSpecName: "utilities") pod "bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9" (UID: "bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:42 crc kubenswrapper[4857]: I1128 13:44:42.694441 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9-kube-api-access-9s5tg" (OuterVolumeSpecName: "kube-api-access-9s5tg") pod "bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9" (UID: "bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9"). InnerVolumeSpecName "kube-api-access-9s5tg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:42 crc kubenswrapper[4857]: I1128 13:44:42.743281 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9" (UID: "bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:42 crc kubenswrapper[4857]: I1128 13:44:42.790598 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9s5tg\" (UniqueName: \"kubernetes.io/projected/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9-kube-api-access-9s5tg\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:42 crc kubenswrapper[4857]: I1128 13:44:42.790641 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:42 crc kubenswrapper[4857]: I1128 13:44:42.790661 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.126730 4857 generic.go:334] "Generic (PLEG): container finished" podID="bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9" containerID="96cbcf0fddb176c928178baf9f24a83a10e99f950262eb47b1421bf8da25e8bf" exitCode=0 Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.126807 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ws79r" event={"ID":"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9","Type":"ContainerDied","Data":"96cbcf0fddb176c928178baf9f24a83a10e99f950262eb47b1421bf8da25e8bf"} Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.126843 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ws79r" event={"ID":"bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9","Type":"ContainerDied","Data":"d5928abcb16a238cd7a14f9a1a2ff57e074102876231f034473d503918ab23e0"} Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.126875 4857 scope.go:117] "RemoveContainer" containerID="96cbcf0fddb176c928178baf9f24a83a10e99f950262eb47b1421bf8da25e8bf" Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.126943 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ws79r" Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.169027 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ws79r"] Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.172049 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="3a952329-a8d9-432d-ac5b-d88b7e2ede6b" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.176:9292/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.172264 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="3a952329-a8d9-432d-ac5b-d88b7e2ede6b" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.176:9292/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.173488 4857 scope.go:117] "RemoveContainer" containerID="26a175955d7ba4f07400ba9d7cb332ee04810b059ce8e8148aeaaac7fdafb022" Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.175185 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ws79r"] Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.210644 4857 scope.go:117] "RemoveContainer" containerID="4b6ab22beccd7ebed0cb247e67acec38f57a9a5fc21c45a9dbe4f6611c070ce6" Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.235507 4857 scope.go:117] "RemoveContainer" containerID="96cbcf0fddb176c928178baf9f24a83a10e99f950262eb47b1421bf8da25e8bf" Nov 28 13:44:43 crc kubenswrapper[4857]: E1128 13:44:43.236168 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96cbcf0fddb176c928178baf9f24a83a10e99f950262eb47b1421bf8da25e8bf\": container with ID starting with 96cbcf0fddb176c928178baf9f24a83a10e99f950262eb47b1421bf8da25e8bf not found: ID does not exist" containerID="96cbcf0fddb176c928178baf9f24a83a10e99f950262eb47b1421bf8da25e8bf" Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.236207 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96cbcf0fddb176c928178baf9f24a83a10e99f950262eb47b1421bf8da25e8bf"} err="failed to get container status \"96cbcf0fddb176c928178baf9f24a83a10e99f950262eb47b1421bf8da25e8bf\": rpc error: code = NotFound desc = could not find container \"96cbcf0fddb176c928178baf9f24a83a10e99f950262eb47b1421bf8da25e8bf\": container with ID starting with 96cbcf0fddb176c928178baf9f24a83a10e99f950262eb47b1421bf8da25e8bf not found: ID does not exist" Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.236229 4857 scope.go:117] "RemoveContainer" containerID="26a175955d7ba4f07400ba9d7cb332ee04810b059ce8e8148aeaaac7fdafb022" Nov 28 13:44:43 crc kubenswrapper[4857]: E1128 13:44:43.236684 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26a175955d7ba4f07400ba9d7cb332ee04810b059ce8e8148aeaaac7fdafb022\": container with ID starting with 26a175955d7ba4f07400ba9d7cb332ee04810b059ce8e8148aeaaac7fdafb022 not found: ID does not exist" containerID="26a175955d7ba4f07400ba9d7cb332ee04810b059ce8e8148aeaaac7fdafb022" Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.236780 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26a175955d7ba4f07400ba9d7cb332ee04810b059ce8e8148aeaaac7fdafb022"} err="failed to get container status \"26a175955d7ba4f07400ba9d7cb332ee04810b059ce8e8148aeaaac7fdafb022\": rpc error: code = NotFound desc = could not find container \"26a175955d7ba4f07400ba9d7cb332ee04810b059ce8e8148aeaaac7fdafb022\": container with ID starting with 26a175955d7ba4f07400ba9d7cb332ee04810b059ce8e8148aeaaac7fdafb022 not found: ID does not exist" Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.236824 4857 scope.go:117] "RemoveContainer" containerID="4b6ab22beccd7ebed0cb247e67acec38f57a9a5fc21c45a9dbe4f6611c070ce6" Nov 28 13:44:43 crc kubenswrapper[4857]: E1128 13:44:43.237482 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b6ab22beccd7ebed0cb247e67acec38f57a9a5fc21c45a9dbe4f6611c070ce6\": container with ID starting with 4b6ab22beccd7ebed0cb247e67acec38f57a9a5fc21c45a9dbe4f6611c070ce6 not found: ID does not exist" containerID="4b6ab22beccd7ebed0cb247e67acec38f57a9a5fc21c45a9dbe4f6611c070ce6" Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.237690 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b6ab22beccd7ebed0cb247e67acec38f57a9a5fc21c45a9dbe4f6611c070ce6"} err="failed to get container status \"4b6ab22beccd7ebed0cb247e67acec38f57a9a5fc21c45a9dbe4f6611c070ce6\": rpc error: code = NotFound desc = could not find container \"4b6ab22beccd7ebed0cb247e67acec38f57a9a5fc21c45a9dbe4f6611c070ce6\": container with ID starting with 4b6ab22beccd7ebed0cb247e67acec38f57a9a5fc21c45a9dbe4f6611c070ce6 not found: ID does not exist" Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.796455 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/placement-749fd8cf96-rbd6r" podUID="946c0669-4c99-46b7-a9ff-437042383642" containerName="placement-log" probeResult="failure" output="Get \"https://10.217.0.149:8778/\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 13:44:43 crc kubenswrapper[4857]: I1128 13:44:43.796524 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/placement-749fd8cf96-rbd6r" podUID="946c0669-4c99-46b7-a9ff-437042383642" containerName="placement-api" probeResult="failure" output="Get \"https://10.217.0.149:8778/\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 13:44:44 crc kubenswrapper[4857]: I1128 13:44:44.322708 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9" path="/var/lib/kubelet/pods/bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9/volumes" Nov 28 13:44:45 crc kubenswrapper[4857]: E1128 13:44:45.027408 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:45 crc kubenswrapper[4857]: E1128 13:44:45.027986 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts podName:6f75b361-6a38-42a4-971c-1b3a68a3f10f nodeName:}" failed. No retries permitted until 2025-11-28 13:45:17.027947317 +0000 UTC m=+1609.055322524 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts") pod "novaapi7cc9-account-delete-qjqg5" (UID: "6f75b361-6a38-42a4-971c-1b3a68a3f10f") : configmap "openstack-scripts" not found Nov 28 13:44:45 crc kubenswrapper[4857]: E1128 13:44:45.027461 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:45 crc kubenswrapper[4857]: E1128 13:44:45.028108 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts podName:ad7bc32b-e1f1-4ce5-a094-56f37d676131 nodeName:}" failed. No retries permitted until 2025-11-28 13:45:17.028078691 +0000 UTC m=+1609.055453928 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts") pod "novacell032fc-account-delete-xk7xx" (UID: "ad7bc32b-e1f1-4ce5-a094-56f37d676131") : configmap "openstack-scripts" not found Nov 28 13:44:45 crc kubenswrapper[4857]: E1128 13:44:45.129212 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:45 crc kubenswrapper[4857]: E1128 13:44:45.129299 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts podName:d8c0e041-9c74-4a06-a966-833e919e745a nodeName:}" failed. No retries permitted until 2025-11-28 13:45:17.129275008 +0000 UTC m=+1609.156650185 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts") pod "barbican7dd8-account-delete-jg2j5" (UID: "d8c0e041-9c74-4a06-a966-833e919e745a") : configmap "openstack-scripts" not found Nov 28 13:44:45 crc kubenswrapper[4857]: E1128 13:44:45.129350 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:44:45 crc kubenswrapper[4857]: E1128 13:44:45.129419 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts podName:24a3dca4-a3d0-479d-9be8-fb8c16f97a77 nodeName:}" failed. No retries permitted until 2025-11-28 13:45:17.129398801 +0000 UTC m=+1609.156774008 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts") pod "cinderd8b3-account-delete-lxwj8" (UID: "24a3dca4-a3d0-479d-9be8-fb8c16f97a77") : configmap "openstack-scripts" not found Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.163643 4857 generic.go:334] "Generic (PLEG): container finished" podID="6f75b361-6a38-42a4-971c-1b3a68a3f10f" containerID="e0661472bb397b0a3a1dd55baa5c4817a98ad0975c18b4cb900b5650b0bc0b7a" exitCode=137 Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.163743 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi7cc9-account-delete-qjqg5" event={"ID":"6f75b361-6a38-42a4-971c-1b3a68a3f10f","Type":"ContainerDied","Data":"e0661472bb397b0a3a1dd55baa5c4817a98ad0975c18b4cb900b5650b0bc0b7a"} Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.166792 4857 generic.go:334] "Generic (PLEG): container finished" podID="d8c0e041-9c74-4a06-a966-833e919e745a" containerID="7b2a4e3bea8fb0cf276592ab346a1a0e736d0c8fe0be99aa787de15196b3f05e" exitCode=137 Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.166851 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican7dd8-account-delete-jg2j5" event={"ID":"d8c0e041-9c74-4a06-a966-833e919e745a","Type":"ContainerDied","Data":"7b2a4e3bea8fb0cf276592ab346a1a0e736d0c8fe0be99aa787de15196b3f05e"} Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.169431 4857 generic.go:334] "Generic (PLEG): container finished" podID="24a3dca4-a3d0-479d-9be8-fb8c16f97a77" containerID="c87cac856e484a65204ac4a22fcde410a6698e64a13f6990ed0561ebfe6b4815" exitCode=137 Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.169502 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinderd8b3-account-delete-lxwj8" event={"ID":"24a3dca4-a3d0-479d-9be8-fb8c16f97a77","Type":"ContainerDied","Data":"c87cac856e484a65204ac4a22fcde410a6698e64a13f6990ed0561ebfe6b4815"} Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.170926 4857 generic.go:334] "Generic (PLEG): container finished" podID="ad7bc32b-e1f1-4ce5-a094-56f37d676131" containerID="176a8c3598639b71724934fa1590ebaca77aa44b9b8de6fbcb3127e2a34f1547" exitCode=137 Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.170956 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell032fc-account-delete-xk7xx" event={"ID":"ad7bc32b-e1f1-4ce5-a094-56f37d676131","Type":"ContainerDied","Data":"176a8c3598639b71724934fa1590ebaca77aa44b9b8de6fbcb3127e2a34f1547"} Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.592612 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinderd8b3-account-delete-lxwj8" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.678978 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell032fc-account-delete-xk7xx" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.690555 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican7dd8-account-delete-jg2j5" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.698729 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi7cc9-account-delete-qjqg5" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.752428 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts\") pod \"24a3dca4-a3d0-479d-9be8-fb8c16f97a77\" (UID: \"24a3dca4-a3d0-479d-9be8-fb8c16f97a77\") " Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.752500 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9k2h8\" (UniqueName: \"kubernetes.io/projected/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-kube-api-access-9k2h8\") pod \"24a3dca4-a3d0-479d-9be8-fb8c16f97a77\" (UID: \"24a3dca4-a3d0-479d-9be8-fb8c16f97a77\") " Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.752819 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6qfwm\" (UniqueName: \"kubernetes.io/projected/6f75b361-6a38-42a4-971c-1b3a68a3f10f-kube-api-access-6qfwm\") pod \"6f75b361-6a38-42a4-971c-1b3a68a3f10f\" (UID: \"6f75b361-6a38-42a4-971c-1b3a68a3f10f\") " Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.752905 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts\") pod \"ad7bc32b-e1f1-4ce5-a094-56f37d676131\" (UID: \"ad7bc32b-e1f1-4ce5-a094-56f37d676131\") " Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.752943 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmb7f\" (UniqueName: \"kubernetes.io/projected/ad7bc32b-e1f1-4ce5-a094-56f37d676131-kube-api-access-lmb7f\") pod \"ad7bc32b-e1f1-4ce5-a094-56f37d676131\" (UID: \"ad7bc32b-e1f1-4ce5-a094-56f37d676131\") " Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.752969 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts\") pod \"6f75b361-6a38-42a4-971c-1b3a68a3f10f\" (UID: \"6f75b361-6a38-42a4-971c-1b3a68a3f10f\") " Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.753006 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts\") pod \"d8c0e041-9c74-4a06-a966-833e919e745a\" (UID: \"d8c0e041-9c74-4a06-a966-833e919e745a\") " Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.753032 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dkr6\" (UniqueName: \"kubernetes.io/projected/d8c0e041-9c74-4a06-a966-833e919e745a-kube-api-access-6dkr6\") pod \"d8c0e041-9c74-4a06-a966-833e919e745a\" (UID: \"d8c0e041-9c74-4a06-a966-833e919e745a\") " Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.753113 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "24a3dca4-a3d0-479d-9be8-fb8c16f97a77" (UID: "24a3dca4-a3d0-479d-9be8-fb8c16f97a77"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.753419 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.753601 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d8c0e041-9c74-4a06-a966-833e919e745a" (UID: "d8c0e041-9c74-4a06-a966-833e919e745a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.753594 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ad7bc32b-e1f1-4ce5-a094-56f37d676131" (UID: "ad7bc32b-e1f1-4ce5-a094-56f37d676131"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.753952 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6f75b361-6a38-42a4-971c-1b3a68a3f10f" (UID: "6f75b361-6a38-42a4-971c-1b3a68a3f10f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.757881 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad7bc32b-e1f1-4ce5-a094-56f37d676131-kube-api-access-lmb7f" (OuterVolumeSpecName: "kube-api-access-lmb7f") pod "ad7bc32b-e1f1-4ce5-a094-56f37d676131" (UID: "ad7bc32b-e1f1-4ce5-a094-56f37d676131"). InnerVolumeSpecName "kube-api-access-lmb7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.758036 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-kube-api-access-9k2h8" (OuterVolumeSpecName: "kube-api-access-9k2h8") pod "24a3dca4-a3d0-479d-9be8-fb8c16f97a77" (UID: "24a3dca4-a3d0-479d-9be8-fb8c16f97a77"). InnerVolumeSpecName "kube-api-access-9k2h8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.759270 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f75b361-6a38-42a4-971c-1b3a68a3f10f-kube-api-access-6qfwm" (OuterVolumeSpecName: "kube-api-access-6qfwm") pod "6f75b361-6a38-42a4-971c-1b3a68a3f10f" (UID: "6f75b361-6a38-42a4-971c-1b3a68a3f10f"). InnerVolumeSpecName "kube-api-access-6qfwm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.759876 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8c0e041-9c74-4a06-a966-833e919e745a-kube-api-access-6dkr6" (OuterVolumeSpecName: "kube-api-access-6dkr6") pod "d8c0e041-9c74-4a06-a966-833e919e745a" (UID: "d8c0e041-9c74-4a06-a966-833e919e745a"). InnerVolumeSpecName "kube-api-access-6dkr6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.854436 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9k2h8\" (UniqueName: \"kubernetes.io/projected/24a3dca4-a3d0-479d-9be8-fb8c16f97a77-kube-api-access-9k2h8\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.854481 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6qfwm\" (UniqueName: \"kubernetes.io/projected/6f75b361-6a38-42a4-971c-1b3a68a3f10f-kube-api-access-6qfwm\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.854496 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ad7bc32b-e1f1-4ce5-a094-56f37d676131-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.854508 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmb7f\" (UniqueName: \"kubernetes.io/projected/ad7bc32b-e1f1-4ce5-a094-56f37d676131-kube-api-access-lmb7f\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.854520 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f75b361-6a38-42a4-971c-1b3a68a3f10f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.854532 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8c0e041-9c74-4a06-a966-833e919e745a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:46 crc kubenswrapper[4857]: I1128 13:44:46.854544 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6dkr6\" (UniqueName: \"kubernetes.io/projected/d8c0e041-9c74-4a06-a966-833e919e745a-kube-api-access-6dkr6\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.195405 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell032fc-account-delete-xk7xx" event={"ID":"ad7bc32b-e1f1-4ce5-a094-56f37d676131","Type":"ContainerDied","Data":"bbafd5b524434b94d1c4d5221aacb50eb0ef8348685cfe759c6ef6713ce991f4"} Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.196373 4857 scope.go:117] "RemoveContainer" containerID="176a8c3598639b71724934fa1590ebaca77aa44b9b8de6fbcb3127e2a34f1547" Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.195451 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell032fc-account-delete-xk7xx" Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.202785 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi7cc9-account-delete-qjqg5" Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.204127 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi7cc9-account-delete-qjqg5" event={"ID":"6f75b361-6a38-42a4-971c-1b3a68a3f10f","Type":"ContainerDied","Data":"6490c90ff4345c57ffecc838b7daecc71bba27fe8678efb3148de71bd883910c"} Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.207368 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican7dd8-account-delete-jg2j5" event={"ID":"d8c0e041-9c74-4a06-a966-833e919e745a","Type":"ContainerDied","Data":"85cc8f8947446485c19e1a73e1557ca7c966bab25b33207b2360b87918d2a63c"} Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.207547 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican7dd8-account-delete-jg2j5" Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.213191 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinderd8b3-account-delete-lxwj8" event={"ID":"24a3dca4-a3d0-479d-9be8-fb8c16f97a77","Type":"ContainerDied","Data":"017d43f882bf4a5325fc042ae896444e41b03857edf394e39ab7e177cc19b3bf"} Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.213714 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinderd8b3-account-delete-lxwj8" Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.250638 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapi7cc9-account-delete-qjqg5"] Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.263907 4857 scope.go:117] "RemoveContainer" containerID="e0661472bb397b0a3a1dd55baa5c4817a98ad0975c18b4cb900b5650b0bc0b7a" Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.272651 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novaapi7cc9-account-delete-qjqg5"] Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.288343 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican7dd8-account-delete-jg2j5"] Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.295985 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican7dd8-account-delete-jg2j5"] Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.297366 4857 scope.go:117] "RemoveContainer" containerID="7b2a4e3bea8fb0cf276592ab346a1a0e736d0c8fe0be99aa787de15196b3f05e" Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.304390 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinderd8b3-account-delete-lxwj8"] Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.310588 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinderd8b3-account-delete-lxwj8"] Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.316110 4857 scope.go:117] "RemoveContainer" containerID="c87cac856e484a65204ac4a22fcde410a6698e64a13f6990ed0561ebfe6b4815" Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.316629 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell032fc-account-delete-xk7xx"] Nov 28 13:44:47 crc kubenswrapper[4857]: I1128 13:44:47.321887 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell032fc-account-delete-xk7xx"] Nov 28 13:44:48 crc kubenswrapper[4857]: I1128 13:44:48.314946 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:44:48 crc kubenswrapper[4857]: E1128 13:44:48.316160 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:44:48 crc kubenswrapper[4857]: I1128 13:44:48.324693 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24a3dca4-a3d0-479d-9be8-fb8c16f97a77" path="/var/lib/kubelet/pods/24a3dca4-a3d0-479d-9be8-fb8c16f97a77/volumes" Nov 28 13:44:48 crc kubenswrapper[4857]: I1128 13:44:48.325857 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f75b361-6a38-42a4-971c-1b3a68a3f10f" path="/var/lib/kubelet/pods/6f75b361-6a38-42a4-971c-1b3a68a3f10f/volumes" Nov 28 13:44:48 crc kubenswrapper[4857]: I1128 13:44:48.326908 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad7bc32b-e1f1-4ce5-a094-56f37d676131" path="/var/lib/kubelet/pods/ad7bc32b-e1f1-4ce5-a094-56f37d676131/volumes" Nov 28 13:44:48 crc kubenswrapper[4857]: I1128 13:44:48.328113 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8c0e041-9c74-4a06-a966-833e919e745a" path="/var/lib/kubelet/pods/d8c0e041-9c74-4a06-a966-833e919e745a/volumes" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.147171 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk"] Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148110 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="account-server" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148131 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="account-server" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148150 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-expirer" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148158 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-expirer" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148171 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovsdb-server" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148176 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovsdb-server" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148184 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9" containerName="registry-server" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148191 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9" containerName="registry-server" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148207 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9" containerName="extract-content" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148214 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9" containerName="extract-content" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148223 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f75b361-6a38-42a4-971c-1b3a68a3f10f" containerName="mariadb-account-delete" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148230 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f75b361-6a38-42a4-971c-1b3a68a3f10f" containerName="mariadb-account-delete" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148240 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-auditor" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148247 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-auditor" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148260 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8c0e041-9c74-4a06-a966-833e919e745a" containerName="mariadb-account-delete" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148266 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8c0e041-9c74-4a06-a966-833e919e745a" containerName="mariadb-account-delete" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148277 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="rsync" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148284 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="rsync" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148297 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="account-reaper" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148304 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="account-reaper" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148317 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="account-replicator" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148325 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="account-replicator" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148337 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-server" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148344 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-server" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148357 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-updater" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148364 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-updater" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148373 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="container-auditor" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148380 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="container-auditor" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148392 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovs-vswitchd" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148399 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovs-vswitchd" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148414 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="container-replicator" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148421 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="container-replicator" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148435 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9" containerName="extract-utilities" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148442 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9" containerName="extract-utilities" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148454 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad7bc32b-e1f1-4ce5-a094-56f37d676131" containerName="mariadb-account-delete" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148462 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad7bc32b-e1f1-4ce5-a094-56f37d676131" containerName="mariadb-account-delete" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148474 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-replicator" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148481 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-replicator" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148491 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="swift-recon-cron" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148497 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="swift-recon-cron" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148507 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovsdb-server-init" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148515 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovsdb-server-init" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148525 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="account-auditor" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148531 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="account-auditor" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148542 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24a3dca4-a3d0-479d-9be8-fb8c16f97a77" containerName="mariadb-account-delete" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148548 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="24a3dca4-a3d0-479d-9be8-fb8c16f97a77" containerName="mariadb-account-delete" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148559 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="container-updater" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148567 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="container-updater" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.148574 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="container-server" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148580 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="container-server" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148731 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="account-server" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148743 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-server" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148773 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="container-updater" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148782 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f75b361-6a38-42a4-971c-1b3a68a3f10f" containerName="mariadb-account-delete" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148797 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-updater" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148806 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="account-replicator" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148817 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-expirer" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148827 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovs-vswitchd" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148838 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="account-reaper" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148846 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-replicator" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148857 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="container-auditor" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148867 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb2d83f9-4fae-4cfc-9b9d-4a2ad890ccd9" containerName="registry-server" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148877 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="c80a8609-29af-4833-856c-ee4094abcc0c" containerName="ovsdb-server" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148889 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="container-server" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148899 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="container-replicator" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148906 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad7bc32b-e1f1-4ce5-a094-56f37d676131" containerName="mariadb-account-delete" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148917 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="24a3dca4-a3d0-479d-9be8-fb8c16f97a77" containerName="mariadb-account-delete" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148929 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8c0e041-9c74-4a06-a966-833e919e745a" containerName="mariadb-account-delete" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148939 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="swift-recon-cron" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148948 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="account-auditor" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148960 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="object-auditor" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.148972 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5b28d1e-e702-4528-9964-72ad176a20b3" containerName="rsync" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.149449 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.152280 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.152609 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.161664 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk"] Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.261621 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d8fca0a0-cd3b-4be8-91c4-70e172de159c-config-volume\") pod \"collect-profiles-29405625-ml2vk\" (UID: \"d8fca0a0-cd3b-4be8-91c4-70e172de159c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.261673 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgp96\" (UniqueName: \"kubernetes.io/projected/d8fca0a0-cd3b-4be8-91c4-70e172de159c-kube-api-access-kgp96\") pod \"collect-profiles-29405625-ml2vk\" (UID: \"d8fca0a0-cd3b-4be8-91c4-70e172de159c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.261851 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d8fca0a0-cd3b-4be8-91c4-70e172de159c-secret-volume\") pod \"collect-profiles-29405625-ml2vk\" (UID: \"d8fca0a0-cd3b-4be8-91c4-70e172de159c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.309553 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.309788 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.363689 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d8fca0a0-cd3b-4be8-91c4-70e172de159c-config-volume\") pod \"collect-profiles-29405625-ml2vk\" (UID: \"d8fca0a0-cd3b-4be8-91c4-70e172de159c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.363765 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgp96\" (UniqueName: \"kubernetes.io/projected/d8fca0a0-cd3b-4be8-91c4-70e172de159c-kube-api-access-kgp96\") pod \"collect-profiles-29405625-ml2vk\" (UID: \"d8fca0a0-cd3b-4be8-91c4-70e172de159c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.363926 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d8fca0a0-cd3b-4be8-91c4-70e172de159c-secret-volume\") pod \"collect-profiles-29405625-ml2vk\" (UID: \"d8fca0a0-cd3b-4be8-91c4-70e172de159c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.365443 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d8fca0a0-cd3b-4be8-91c4-70e172de159c-config-volume\") pod \"collect-profiles-29405625-ml2vk\" (UID: \"d8fca0a0-cd3b-4be8-91c4-70e172de159c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.377384 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d8fca0a0-cd3b-4be8-91c4-70e172de159c-secret-volume\") pod \"collect-profiles-29405625-ml2vk\" (UID: \"d8fca0a0-cd3b-4be8-91c4-70e172de159c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.381733 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgp96\" (UniqueName: \"kubernetes.io/projected/d8fca0a0-cd3b-4be8-91c4-70e172de159c-kube-api-access-kgp96\") pod \"collect-profiles-29405625-ml2vk\" (UID: \"d8fca0a0-cd3b-4be8-91c4-70e172de159c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.479190 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.921177 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk"] Nov 28 13:45:01 crc kubenswrapper[4857]: I1128 13:45:01.353953 4857 generic.go:334] "Generic (PLEG): container finished" podID="d8fca0a0-cd3b-4be8-91c4-70e172de159c" containerID="a139ecaa5875565270942dc63d797c1501227a690579fa3a194860c1152e4c9c" exitCode=0 Nov 28 13:45:01 crc kubenswrapper[4857]: I1128 13:45:01.354034 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk" event={"ID":"d8fca0a0-cd3b-4be8-91c4-70e172de159c","Type":"ContainerDied","Data":"a139ecaa5875565270942dc63d797c1501227a690579fa3a194860c1152e4c9c"} Nov 28 13:45:01 crc kubenswrapper[4857]: I1128 13:45:01.354263 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk" event={"ID":"d8fca0a0-cd3b-4be8-91c4-70e172de159c","Type":"ContainerStarted","Data":"8314c70c82b4240ece6a88cfd420a009c6a4eaaa1a55a87f185dc55485e515ea"} Nov 28 13:45:02 crc kubenswrapper[4857]: I1128 13:45:02.812451 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk" Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.006657 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kgp96\" (UniqueName: \"kubernetes.io/projected/d8fca0a0-cd3b-4be8-91c4-70e172de159c-kube-api-access-kgp96\") pod \"d8fca0a0-cd3b-4be8-91c4-70e172de159c\" (UID: \"d8fca0a0-cd3b-4be8-91c4-70e172de159c\") " Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.006739 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d8fca0a0-cd3b-4be8-91c4-70e172de159c-secret-volume\") pod \"d8fca0a0-cd3b-4be8-91c4-70e172de159c\" (UID: \"d8fca0a0-cd3b-4be8-91c4-70e172de159c\") " Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.006853 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d8fca0a0-cd3b-4be8-91c4-70e172de159c-config-volume\") pod \"d8fca0a0-cd3b-4be8-91c4-70e172de159c\" (UID: \"d8fca0a0-cd3b-4be8-91c4-70e172de159c\") " Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.008101 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8fca0a0-cd3b-4be8-91c4-70e172de159c-config-volume" (OuterVolumeSpecName: "config-volume") pod "d8fca0a0-cd3b-4be8-91c4-70e172de159c" (UID: "d8fca0a0-cd3b-4be8-91c4-70e172de159c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.014035 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8fca0a0-cd3b-4be8-91c4-70e172de159c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d8fca0a0-cd3b-4be8-91c4-70e172de159c" (UID: "d8fca0a0-cd3b-4be8-91c4-70e172de159c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.014027 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8fca0a0-cd3b-4be8-91c4-70e172de159c-kube-api-access-kgp96" (OuterVolumeSpecName: "kube-api-access-kgp96") pod "d8fca0a0-cd3b-4be8-91c4-70e172de159c" (UID: "d8fca0a0-cd3b-4be8-91c4-70e172de159c"). InnerVolumeSpecName "kube-api-access-kgp96". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.109026 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kgp96\" (UniqueName: \"kubernetes.io/projected/d8fca0a0-cd3b-4be8-91c4-70e172de159c-kube-api-access-kgp96\") on node \"crc\" DevicePath \"\"" Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.109079 4857 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d8fca0a0-cd3b-4be8-91c4-70e172de159c-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.109102 4857 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d8fca0a0-cd3b-4be8-91c4-70e172de159c-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.381563 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk" event={"ID":"d8fca0a0-cd3b-4be8-91c4-70e172de159c","Type":"ContainerDied","Data":"8314c70c82b4240ece6a88cfd420a009c6a4eaaa1a55a87f185dc55485e515ea"} Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.381629 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8314c70c82b4240ece6a88cfd420a009c6a4eaaa1a55a87f185dc55485e515ea" Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.381722 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-ml2vk" Nov 28 13:45:14 crc kubenswrapper[4857]: I1128 13:45:14.310222 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:45:14 crc kubenswrapper[4857]: E1128 13:45:14.311178 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:45:27 crc kubenswrapper[4857]: I1128 13:45:27.310470 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:45:27 crc kubenswrapper[4857]: E1128 13:45:27.311507 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:45:39 crc kubenswrapper[4857]: I1128 13:45:39.015130 4857 scope.go:117] "RemoveContainer" containerID="9571dde5e18b53bcb326ad442ce10db79198107d0140d43bcf9dc5435a836278" Nov 28 13:45:39 crc kubenswrapper[4857]: I1128 13:45:39.055935 4857 scope.go:117] "RemoveContainer" containerID="c3b960d83097e2d52058b222703bf37d4168bd557a57337292c4ea2a1d269c32" Nov 28 13:45:39 crc kubenswrapper[4857]: I1128 13:45:39.102841 4857 scope.go:117] "RemoveContainer" containerID="3284e67be7e4ba9585a3ba4373dbece92afd37195784b345ddad64c4d295b925" Nov 28 13:45:39 crc kubenswrapper[4857]: I1128 13:45:39.131801 4857 scope.go:117] "RemoveContainer" containerID="c9fbfe8853f76db26be85fbc07e14c1743b9d9e65d61b36a44483686c3031648" Nov 28 13:45:39 crc kubenswrapper[4857]: I1128 13:45:39.161567 4857 scope.go:117] "RemoveContainer" containerID="5351abf72165635270eecdbf0fccb9f428f502d3b3b5b6535e65aa42cf4be817" Nov 28 13:45:39 crc kubenswrapper[4857]: I1128 13:45:39.187692 4857 scope.go:117] "RemoveContainer" containerID="6003a7f8ab27a4820565904d5d128a843aa2e8125b96ec24769c383a1f6201e3" Nov 28 13:45:39 crc kubenswrapper[4857]: I1128 13:45:39.214433 4857 scope.go:117] "RemoveContainer" containerID="374e5e270733150ea249d79e7fb14a49da907af6ce513d9434f187161e66df2b" Nov 28 13:45:39 crc kubenswrapper[4857]: I1128 13:45:39.235955 4857 scope.go:117] "RemoveContainer" containerID="015bbd0a1a1e9fb405214fe7a35a6c512629833b2d306bd11d97dfd7b5021dee" Nov 28 13:45:40 crc kubenswrapper[4857]: I1128 13:45:40.310553 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:45:40 crc kubenswrapper[4857]: E1128 13:45:40.311292 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:45:51 crc kubenswrapper[4857]: I1128 13:45:51.309976 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:45:51 crc kubenswrapper[4857]: E1128 13:45:51.310602 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:46:02 crc kubenswrapper[4857]: I1128 13:46:02.310365 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:46:02 crc kubenswrapper[4857]: E1128 13:46:02.311187 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:46:15 crc kubenswrapper[4857]: I1128 13:46:15.309983 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:46:15 crc kubenswrapper[4857]: E1128 13:46:15.310884 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:46:29 crc kubenswrapper[4857]: I1128 13:46:29.310206 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:46:29 crc kubenswrapper[4857]: E1128 13:46:29.311658 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:46:39 crc kubenswrapper[4857]: I1128 13:46:39.515939 4857 scope.go:117] "RemoveContainer" containerID="9bebe354f4e687668c7b355847459f71870ebcf2d1bbe3d8bce669ca51585b5b" Nov 28 13:46:39 crc kubenswrapper[4857]: I1128 13:46:39.552844 4857 scope.go:117] "RemoveContainer" containerID="91dcce04820a9b0657a39d68a3db2fea0d2ae4c92ed1c937c1ef2c2fce48486a" Nov 28 13:46:39 crc kubenswrapper[4857]: I1128 13:46:39.594963 4857 scope.go:117] "RemoveContainer" containerID="e46dd6d35ad547e09a4579d4f034ecbb1cec046bf81e0cef193422a55414ebac" Nov 28 13:46:39 crc kubenswrapper[4857]: I1128 13:46:39.644083 4857 scope.go:117] "RemoveContainer" containerID="88d66ca4343559d7d4481c694db17d2be3dfdb37e9c02ffc49a3d608062a5a93" Nov 28 13:46:39 crc kubenswrapper[4857]: I1128 13:46:39.666679 4857 scope.go:117] "RemoveContainer" containerID="c5fa9e8bb8c361f1c6685760ea46af512b07d5764950a9492028f7c42b4af089" Nov 28 13:46:39 crc kubenswrapper[4857]: I1128 13:46:39.695305 4857 scope.go:117] "RemoveContainer" containerID="2676ac5d14e3c0810d8b7bcc46f55ae7cc4e6f8b9048e707dd961359a7bd5c41" Nov 28 13:46:39 crc kubenswrapper[4857]: I1128 13:46:39.728884 4857 scope.go:117] "RemoveContainer" containerID="4b0c9ed1f44474664d82aa8cab066df058686abc50bf319b8d3ab71fdc39a7d8" Nov 28 13:46:39 crc kubenswrapper[4857]: I1128 13:46:39.759277 4857 scope.go:117] "RemoveContainer" containerID="2d6d6c3dd1604b94e9e508e69b19b0de5d2d7a133234920d03dc0f4547ebbfba" Nov 28 13:46:39 crc kubenswrapper[4857]: I1128 13:46:39.785018 4857 scope.go:117] "RemoveContainer" containerID="cf1e9309e85c6ed36109909dfa1bceafc508d39182fe6f235d9c9e9aae2f7c61" Nov 28 13:46:39 crc kubenswrapper[4857]: I1128 13:46:39.815984 4857 scope.go:117] "RemoveContainer" containerID="0878bd25d4cba44dddd8101e2ea744174f24a930321fa1c902f705f1860a22f1" Nov 28 13:46:39 crc kubenswrapper[4857]: I1128 13:46:39.855316 4857 scope.go:117] "RemoveContainer" containerID="a1a46ab9fdee200682dc389948aee53258b76c2adf51df8c6a05643fd6fc557e" Nov 28 13:46:39 crc kubenswrapper[4857]: I1128 13:46:39.886883 4857 scope.go:117] "RemoveContainer" containerID="647db67975f4acdafaff02496b5e1bc40cc59e718b88965d4617b2338c512f5f" Nov 28 13:46:39 crc kubenswrapper[4857]: I1128 13:46:39.903545 4857 scope.go:117] "RemoveContainer" containerID="a9f3b9fd804e2424e73c9dfbddaef4f71d9e87da6184a7e141669067fc738bc2" Nov 28 13:46:39 crc kubenswrapper[4857]: I1128 13:46:39.942402 4857 scope.go:117] "RemoveContainer" containerID="08bd3edd8a1d8b0073c2499c1dbe3a904476b92917461d12cbf12f279796f8e7" Nov 28 13:46:40 crc kubenswrapper[4857]: I1128 13:46:40.310104 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:46:40 crc kubenswrapper[4857]: E1128 13:46:40.310620 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:46:44 crc kubenswrapper[4857]: I1128 13:46:44.883090 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jmfcm"] Nov 28 13:46:44 crc kubenswrapper[4857]: E1128 13:46:44.883815 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8fca0a0-cd3b-4be8-91c4-70e172de159c" containerName="collect-profiles" Nov 28 13:46:44 crc kubenswrapper[4857]: I1128 13:46:44.883832 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8fca0a0-cd3b-4be8-91c4-70e172de159c" containerName="collect-profiles" Nov 28 13:46:44 crc kubenswrapper[4857]: I1128 13:46:44.884027 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8fca0a0-cd3b-4be8-91c4-70e172de159c" containerName="collect-profiles" Nov 28 13:46:44 crc kubenswrapper[4857]: I1128 13:46:44.885246 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jmfcm" Nov 28 13:46:44 crc kubenswrapper[4857]: I1128 13:46:44.902628 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jmfcm"] Nov 28 13:46:44 crc kubenswrapper[4857]: I1128 13:46:44.991657 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11ff23d2-23c1-403f-9eb4-fc94f58a8f87-catalog-content\") pod \"community-operators-jmfcm\" (UID: \"11ff23d2-23c1-403f-9eb4-fc94f58a8f87\") " pod="openshift-marketplace/community-operators-jmfcm" Nov 28 13:46:44 crc kubenswrapper[4857]: I1128 13:46:44.991825 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11ff23d2-23c1-403f-9eb4-fc94f58a8f87-utilities\") pod \"community-operators-jmfcm\" (UID: \"11ff23d2-23c1-403f-9eb4-fc94f58a8f87\") " pod="openshift-marketplace/community-operators-jmfcm" Nov 28 13:46:44 crc kubenswrapper[4857]: I1128 13:46:44.991885 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btvcl\" (UniqueName: \"kubernetes.io/projected/11ff23d2-23c1-403f-9eb4-fc94f58a8f87-kube-api-access-btvcl\") pod \"community-operators-jmfcm\" (UID: \"11ff23d2-23c1-403f-9eb4-fc94f58a8f87\") " pod="openshift-marketplace/community-operators-jmfcm" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.092634 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btvcl\" (UniqueName: \"kubernetes.io/projected/11ff23d2-23c1-403f-9eb4-fc94f58a8f87-kube-api-access-btvcl\") pod \"community-operators-jmfcm\" (UID: \"11ff23d2-23c1-403f-9eb4-fc94f58a8f87\") " pod="openshift-marketplace/community-operators-jmfcm" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.092702 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11ff23d2-23c1-403f-9eb4-fc94f58a8f87-catalog-content\") pod \"community-operators-jmfcm\" (UID: \"11ff23d2-23c1-403f-9eb4-fc94f58a8f87\") " pod="openshift-marketplace/community-operators-jmfcm" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.092802 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11ff23d2-23c1-403f-9eb4-fc94f58a8f87-utilities\") pod \"community-operators-jmfcm\" (UID: \"11ff23d2-23c1-403f-9eb4-fc94f58a8f87\") " pod="openshift-marketplace/community-operators-jmfcm" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.093342 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11ff23d2-23c1-403f-9eb4-fc94f58a8f87-catalog-content\") pod \"community-operators-jmfcm\" (UID: \"11ff23d2-23c1-403f-9eb4-fc94f58a8f87\") " pod="openshift-marketplace/community-operators-jmfcm" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.093403 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11ff23d2-23c1-403f-9eb4-fc94f58a8f87-utilities\") pod \"community-operators-jmfcm\" (UID: \"11ff23d2-23c1-403f-9eb4-fc94f58a8f87\") " pod="openshift-marketplace/community-operators-jmfcm" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.128536 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btvcl\" (UniqueName: \"kubernetes.io/projected/11ff23d2-23c1-403f-9eb4-fc94f58a8f87-kube-api-access-btvcl\") pod \"community-operators-jmfcm\" (UID: \"11ff23d2-23c1-403f-9eb4-fc94f58a8f87\") " pod="openshift-marketplace/community-operators-jmfcm" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.205083 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jmfcm" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.662749 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jmfcm"] Nov 28 13:46:46 crc kubenswrapper[4857]: I1128 13:46:46.507729 4857 generic.go:334] "Generic (PLEG): container finished" podID="11ff23d2-23c1-403f-9eb4-fc94f58a8f87" containerID="5fd072b45b486f3381acaeb599a707c07f740e908d71efa5ebefbaba60ba26ce" exitCode=0 Nov 28 13:46:46 crc kubenswrapper[4857]: I1128 13:46:46.507851 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jmfcm" event={"ID":"11ff23d2-23c1-403f-9eb4-fc94f58a8f87","Type":"ContainerDied","Data":"5fd072b45b486f3381acaeb599a707c07f740e908d71efa5ebefbaba60ba26ce"} Nov 28 13:46:46 crc kubenswrapper[4857]: I1128 13:46:46.508073 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jmfcm" event={"ID":"11ff23d2-23c1-403f-9eb4-fc94f58a8f87","Type":"ContainerStarted","Data":"9c22503f915740b749ef22c2a61781885e2c172556b57ff96a31ecba69b6c4bc"} Nov 28 13:46:48 crc kubenswrapper[4857]: I1128 13:46:48.529275 4857 generic.go:334] "Generic (PLEG): container finished" podID="11ff23d2-23c1-403f-9eb4-fc94f58a8f87" containerID="e7b7d2a3b041e3e4395e61dfc3f15241f96ecafa0045c8840e1040497c7db4a4" exitCode=0 Nov 28 13:46:48 crc kubenswrapper[4857]: I1128 13:46:48.529372 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jmfcm" event={"ID":"11ff23d2-23c1-403f-9eb4-fc94f58a8f87","Type":"ContainerDied","Data":"e7b7d2a3b041e3e4395e61dfc3f15241f96ecafa0045c8840e1040497c7db4a4"} Nov 28 13:46:49 crc kubenswrapper[4857]: I1128 13:46:49.542038 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jmfcm" event={"ID":"11ff23d2-23c1-403f-9eb4-fc94f58a8f87","Type":"ContainerStarted","Data":"746d2e4b93e12736b20a89e29cecb70261b398173d824a1568d5981d30168e0a"} Nov 28 13:46:49 crc kubenswrapper[4857]: I1128 13:46:49.567314 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jmfcm" podStartSLOduration=3.081072256 podStartE2EDuration="5.567279971s" podCreationTimestamp="2025-11-28 13:46:44 +0000 UTC" firstStartedPulling="2025-11-28 13:46:46.510407889 +0000 UTC m=+1698.537783066" lastFinishedPulling="2025-11-28 13:46:48.996615604 +0000 UTC m=+1701.023990781" observedRunningTime="2025-11-28 13:46:49.562595876 +0000 UTC m=+1701.589971053" watchObservedRunningTime="2025-11-28 13:46:49.567279971 +0000 UTC m=+1701.594655178" Nov 28 13:46:51 crc kubenswrapper[4857]: I1128 13:46:51.309709 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:46:51 crc kubenswrapper[4857]: E1128 13:46:51.310068 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:46:55 crc kubenswrapper[4857]: I1128 13:46:55.205340 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jmfcm" Nov 28 13:46:55 crc kubenswrapper[4857]: I1128 13:46:55.205949 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jmfcm" Nov 28 13:46:55 crc kubenswrapper[4857]: I1128 13:46:55.283442 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jmfcm" Nov 28 13:46:56 crc kubenswrapper[4857]: I1128 13:46:56.275729 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jmfcm" Nov 28 13:46:56 crc kubenswrapper[4857]: I1128 13:46:56.326396 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jmfcm"] Nov 28 13:46:58 crc kubenswrapper[4857]: I1128 13:46:58.235138 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jmfcm" podUID="11ff23d2-23c1-403f-9eb4-fc94f58a8f87" containerName="registry-server" containerID="cri-o://746d2e4b93e12736b20a89e29cecb70261b398173d824a1568d5981d30168e0a" gracePeriod=2 Nov 28 13:46:58 crc kubenswrapper[4857]: E1128 13:46:58.296001 4857 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod11ff23d2_23c1_403f_9eb4_fc94f58a8f87.slice/crio-746d2e4b93e12736b20a89e29cecb70261b398173d824a1568d5981d30168e0a.scope\": RecentStats: unable to find data in memory cache]" Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.238245 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jmfcm" Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.248636 4857 generic.go:334] "Generic (PLEG): container finished" podID="11ff23d2-23c1-403f-9eb4-fc94f58a8f87" containerID="746d2e4b93e12736b20a89e29cecb70261b398173d824a1568d5981d30168e0a" exitCode=0 Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.248672 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jmfcm" event={"ID":"11ff23d2-23c1-403f-9eb4-fc94f58a8f87","Type":"ContainerDied","Data":"746d2e4b93e12736b20a89e29cecb70261b398173d824a1568d5981d30168e0a"} Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.248719 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jmfcm" event={"ID":"11ff23d2-23c1-403f-9eb4-fc94f58a8f87","Type":"ContainerDied","Data":"9c22503f915740b749ef22c2a61781885e2c172556b57ff96a31ecba69b6c4bc"} Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.248739 4857 scope.go:117] "RemoveContainer" containerID="746d2e4b93e12736b20a89e29cecb70261b398173d824a1568d5981d30168e0a" Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.248688 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jmfcm" Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.272136 4857 scope.go:117] "RemoveContainer" containerID="e7b7d2a3b041e3e4395e61dfc3f15241f96ecafa0045c8840e1040497c7db4a4" Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.292684 4857 scope.go:117] "RemoveContainer" containerID="5fd072b45b486f3381acaeb599a707c07f740e908d71efa5ebefbaba60ba26ce" Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.313119 4857 scope.go:117] "RemoveContainer" containerID="746d2e4b93e12736b20a89e29cecb70261b398173d824a1568d5981d30168e0a" Nov 28 13:46:59 crc kubenswrapper[4857]: E1128 13:46:59.313527 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"746d2e4b93e12736b20a89e29cecb70261b398173d824a1568d5981d30168e0a\": container with ID starting with 746d2e4b93e12736b20a89e29cecb70261b398173d824a1568d5981d30168e0a not found: ID does not exist" containerID="746d2e4b93e12736b20a89e29cecb70261b398173d824a1568d5981d30168e0a" Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.313567 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"746d2e4b93e12736b20a89e29cecb70261b398173d824a1568d5981d30168e0a"} err="failed to get container status \"746d2e4b93e12736b20a89e29cecb70261b398173d824a1568d5981d30168e0a\": rpc error: code = NotFound desc = could not find container \"746d2e4b93e12736b20a89e29cecb70261b398173d824a1568d5981d30168e0a\": container with ID starting with 746d2e4b93e12736b20a89e29cecb70261b398173d824a1568d5981d30168e0a not found: ID does not exist" Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.313595 4857 scope.go:117] "RemoveContainer" containerID="e7b7d2a3b041e3e4395e61dfc3f15241f96ecafa0045c8840e1040497c7db4a4" Nov 28 13:46:59 crc kubenswrapper[4857]: E1128 13:46:59.314157 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7b7d2a3b041e3e4395e61dfc3f15241f96ecafa0045c8840e1040497c7db4a4\": container with ID starting with e7b7d2a3b041e3e4395e61dfc3f15241f96ecafa0045c8840e1040497c7db4a4 not found: ID does not exist" containerID="e7b7d2a3b041e3e4395e61dfc3f15241f96ecafa0045c8840e1040497c7db4a4" Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.314228 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7b7d2a3b041e3e4395e61dfc3f15241f96ecafa0045c8840e1040497c7db4a4"} err="failed to get container status \"e7b7d2a3b041e3e4395e61dfc3f15241f96ecafa0045c8840e1040497c7db4a4\": rpc error: code = NotFound desc = could not find container \"e7b7d2a3b041e3e4395e61dfc3f15241f96ecafa0045c8840e1040497c7db4a4\": container with ID starting with e7b7d2a3b041e3e4395e61dfc3f15241f96ecafa0045c8840e1040497c7db4a4 not found: ID does not exist" Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.314261 4857 scope.go:117] "RemoveContainer" containerID="5fd072b45b486f3381acaeb599a707c07f740e908d71efa5ebefbaba60ba26ce" Nov 28 13:46:59 crc kubenswrapper[4857]: E1128 13:46:59.314669 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fd072b45b486f3381acaeb599a707c07f740e908d71efa5ebefbaba60ba26ce\": container with ID starting with 5fd072b45b486f3381acaeb599a707c07f740e908d71efa5ebefbaba60ba26ce not found: ID does not exist" containerID="5fd072b45b486f3381acaeb599a707c07f740e908d71efa5ebefbaba60ba26ce" Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.314692 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fd072b45b486f3381acaeb599a707c07f740e908d71efa5ebefbaba60ba26ce"} err="failed to get container status \"5fd072b45b486f3381acaeb599a707c07f740e908d71efa5ebefbaba60ba26ce\": rpc error: code = NotFound desc = could not find container \"5fd072b45b486f3381acaeb599a707c07f740e908d71efa5ebefbaba60ba26ce\": container with ID starting with 5fd072b45b486f3381acaeb599a707c07f740e908d71efa5ebefbaba60ba26ce not found: ID does not exist" Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.399321 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11ff23d2-23c1-403f-9eb4-fc94f58a8f87-utilities\") pod \"11ff23d2-23c1-403f-9eb4-fc94f58a8f87\" (UID: \"11ff23d2-23c1-403f-9eb4-fc94f58a8f87\") " Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.399381 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-btvcl\" (UniqueName: \"kubernetes.io/projected/11ff23d2-23c1-403f-9eb4-fc94f58a8f87-kube-api-access-btvcl\") pod \"11ff23d2-23c1-403f-9eb4-fc94f58a8f87\" (UID: \"11ff23d2-23c1-403f-9eb4-fc94f58a8f87\") " Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.399422 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11ff23d2-23c1-403f-9eb4-fc94f58a8f87-catalog-content\") pod \"11ff23d2-23c1-403f-9eb4-fc94f58a8f87\" (UID: \"11ff23d2-23c1-403f-9eb4-fc94f58a8f87\") " Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.400473 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/11ff23d2-23c1-403f-9eb4-fc94f58a8f87-utilities" (OuterVolumeSpecName: "utilities") pod "11ff23d2-23c1-403f-9eb4-fc94f58a8f87" (UID: "11ff23d2-23c1-403f-9eb4-fc94f58a8f87"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.405798 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11ff23d2-23c1-403f-9eb4-fc94f58a8f87-kube-api-access-btvcl" (OuterVolumeSpecName: "kube-api-access-btvcl") pod "11ff23d2-23c1-403f-9eb4-fc94f58a8f87" (UID: "11ff23d2-23c1-403f-9eb4-fc94f58a8f87"). InnerVolumeSpecName "kube-api-access-btvcl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.452079 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/11ff23d2-23c1-403f-9eb4-fc94f58a8f87-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "11ff23d2-23c1-403f-9eb4-fc94f58a8f87" (UID: "11ff23d2-23c1-403f-9eb4-fc94f58a8f87"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.501291 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-btvcl\" (UniqueName: \"kubernetes.io/projected/11ff23d2-23c1-403f-9eb4-fc94f58a8f87-kube-api-access-btvcl\") on node \"crc\" DevicePath \"\"" Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.501321 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11ff23d2-23c1-403f-9eb4-fc94f58a8f87-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.501331 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11ff23d2-23c1-403f-9eb4-fc94f58a8f87-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.589087 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jmfcm"] Nov 28 13:46:59 crc kubenswrapper[4857]: I1128 13:46:59.595064 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jmfcm"] Nov 28 13:47:00 crc kubenswrapper[4857]: I1128 13:47:00.334305 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11ff23d2-23c1-403f-9eb4-fc94f58a8f87" path="/var/lib/kubelet/pods/11ff23d2-23c1-403f-9eb4-fc94f58a8f87/volumes" Nov 28 13:47:06 crc kubenswrapper[4857]: I1128 13:47:06.309365 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:47:06 crc kubenswrapper[4857]: E1128 13:47:06.309941 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:47:20 crc kubenswrapper[4857]: I1128 13:47:20.310143 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:47:20 crc kubenswrapper[4857]: E1128 13:47:20.310955 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.020647 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-sxgfb/must-gather-wl5fx"] Nov 28 13:47:28 crc kubenswrapper[4857]: E1128 13:47:28.021327 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11ff23d2-23c1-403f-9eb4-fc94f58a8f87" containerName="extract-utilities" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.021339 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="11ff23d2-23c1-403f-9eb4-fc94f58a8f87" containerName="extract-utilities" Nov 28 13:47:28 crc kubenswrapper[4857]: E1128 13:47:28.021384 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11ff23d2-23c1-403f-9eb4-fc94f58a8f87" containerName="extract-content" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.021390 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="11ff23d2-23c1-403f-9eb4-fc94f58a8f87" containerName="extract-content" Nov 28 13:47:28 crc kubenswrapper[4857]: E1128 13:47:28.021411 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11ff23d2-23c1-403f-9eb4-fc94f58a8f87" containerName="registry-server" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.021418 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="11ff23d2-23c1-403f-9eb4-fc94f58a8f87" containerName="registry-server" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.021589 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="11ff23d2-23c1-403f-9eb4-fc94f58a8f87" containerName="registry-server" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.022305 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sxgfb/must-gather-wl5fx" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.024929 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-sxgfb"/"openshift-service-ca.crt" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.044006 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-sxgfb"/"kube-root-ca.crt" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.044303 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-sxgfb"/"default-dockercfg-vvc7j" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.086764 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-sxgfb/must-gather-wl5fx"] Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.183995 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlq72\" (UniqueName: \"kubernetes.io/projected/0cd7df22-643a-4ea0-a182-9f40fca04305-kube-api-access-qlq72\") pod \"must-gather-wl5fx\" (UID: \"0cd7df22-643a-4ea0-a182-9f40fca04305\") " pod="openshift-must-gather-sxgfb/must-gather-wl5fx" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.184354 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0cd7df22-643a-4ea0-a182-9f40fca04305-must-gather-output\") pod \"must-gather-wl5fx\" (UID: \"0cd7df22-643a-4ea0-a182-9f40fca04305\") " pod="openshift-must-gather-sxgfb/must-gather-wl5fx" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.285231 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0cd7df22-643a-4ea0-a182-9f40fca04305-must-gather-output\") pod \"must-gather-wl5fx\" (UID: \"0cd7df22-643a-4ea0-a182-9f40fca04305\") " pod="openshift-must-gather-sxgfb/must-gather-wl5fx" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.285332 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlq72\" (UniqueName: \"kubernetes.io/projected/0cd7df22-643a-4ea0-a182-9f40fca04305-kube-api-access-qlq72\") pod \"must-gather-wl5fx\" (UID: \"0cd7df22-643a-4ea0-a182-9f40fca04305\") " pod="openshift-must-gather-sxgfb/must-gather-wl5fx" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.285814 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0cd7df22-643a-4ea0-a182-9f40fca04305-must-gather-output\") pod \"must-gather-wl5fx\" (UID: \"0cd7df22-643a-4ea0-a182-9f40fca04305\") " pod="openshift-must-gather-sxgfb/must-gather-wl5fx" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.352878 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlq72\" (UniqueName: \"kubernetes.io/projected/0cd7df22-643a-4ea0-a182-9f40fca04305-kube-api-access-qlq72\") pod \"must-gather-wl5fx\" (UID: \"0cd7df22-643a-4ea0-a182-9f40fca04305\") " pod="openshift-must-gather-sxgfb/must-gather-wl5fx" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.355525 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-sxgfb"/"default-dockercfg-vvc7j" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.363832 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sxgfb/must-gather-wl5fx" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.917816 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-sxgfb/must-gather-wl5fx"] Nov 28 13:47:29 crc kubenswrapper[4857]: I1128 13:47:29.603378 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sxgfb/must-gather-wl5fx" event={"ID":"0cd7df22-643a-4ea0-a182-9f40fca04305","Type":"ContainerStarted","Data":"c2bf65a0078418183bc30fd3a270003bff3264cd075176f77c36e21b9559eafb"} Nov 28 13:47:33 crc kubenswrapper[4857]: I1128 13:47:33.309558 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:47:33 crc kubenswrapper[4857]: E1128 13:47:33.310129 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:47:33 crc kubenswrapper[4857]: I1128 13:47:33.639899 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sxgfb/must-gather-wl5fx" event={"ID":"0cd7df22-643a-4ea0-a182-9f40fca04305","Type":"ContainerStarted","Data":"62c62ec6939c2ad08f2ff19215b2086a7083fd71f30e682e92a4da170794129a"} Nov 28 13:47:34 crc kubenswrapper[4857]: I1128 13:47:34.662941 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sxgfb/must-gather-wl5fx" event={"ID":"0cd7df22-643a-4ea0-a182-9f40fca04305","Type":"ContainerStarted","Data":"e7c0f1a2ff3cfee597f67f3bc29d77fd31490547b96129404cba033cdd4a4dd1"} Nov 28 13:47:34 crc kubenswrapper[4857]: I1128 13:47:34.688026 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-sxgfb/must-gather-wl5fx" podStartSLOduration=3.223366861 podStartE2EDuration="7.688003028s" podCreationTimestamp="2025-11-28 13:47:27 +0000 UTC" firstStartedPulling="2025-11-28 13:47:28.92598745 +0000 UTC m=+1740.953362617" lastFinishedPulling="2025-11-28 13:47:33.390623617 +0000 UTC m=+1745.417998784" observedRunningTime="2025-11-28 13:47:34.681662487 +0000 UTC m=+1746.709037664" watchObservedRunningTime="2025-11-28 13:47:34.688003028 +0000 UTC m=+1746.715378195" Nov 28 13:47:40 crc kubenswrapper[4857]: I1128 13:47:40.150017 4857 scope.go:117] "RemoveContainer" containerID="9f2936fe928f6000c1df2fe80515f9fd71cc2a258636283c70afbe2ab56dcf0b" Nov 28 13:47:40 crc kubenswrapper[4857]: I1128 13:47:40.174851 4857 scope.go:117] "RemoveContainer" containerID="6f648c5f8eb830af4905c6385054384b6b5cee1dbbb4c91beefc91f8c133206b" Nov 28 13:47:40 crc kubenswrapper[4857]: I1128 13:47:40.198043 4857 scope.go:117] "RemoveContainer" containerID="742670a443e33f75e66952e569ed4805604ac305cab57c22b54e2fa2ce2f0864" Nov 28 13:47:40 crc kubenswrapper[4857]: I1128 13:47:40.278564 4857 scope.go:117] "RemoveContainer" containerID="71ae9858cfed8aa14244777c5eeb1cd2345f201203894a017134e488b8f0b243" Nov 28 13:47:40 crc kubenswrapper[4857]: I1128 13:47:40.303125 4857 scope.go:117] "RemoveContainer" containerID="9a058d07523d9997875c4df9672d3c2618e01acbee6d4a401522e8f25cb2b82f" Nov 28 13:47:40 crc kubenswrapper[4857]: I1128 13:47:40.346048 4857 scope.go:117] "RemoveContainer" containerID="afe50b0f6d97ab59e3e2e2925c1ad50286d76661f30419d74579b320e2624da7" Nov 28 13:47:40 crc kubenswrapper[4857]: I1128 13:47:40.387864 4857 scope.go:117] "RemoveContainer" containerID="89da98a0c10b1faa48fafd6ba314782afe1ae3811d31a641ddb07be661fdbe5e" Nov 28 13:47:40 crc kubenswrapper[4857]: I1128 13:47:40.425131 4857 scope.go:117] "RemoveContainer" containerID="4bdd0ee5b2dc8d0eba75e5970152f8cfe9df74f09930b295ed3cf6ddb62ac999" Nov 28 13:47:40 crc kubenswrapper[4857]: I1128 13:47:40.450687 4857 scope.go:117] "RemoveContainer" containerID="b9321f2b63f3869f3d5de215cccfae2beee129e35b1ddcfa2bb4212dbb778dac" Nov 28 13:47:40 crc kubenswrapper[4857]: I1128 13:47:40.474933 4857 scope.go:117] "RemoveContainer" containerID="921da2286c74b9205a4963fadea18299c07583052be029c357bcd68f1c378c4d" Nov 28 13:47:40 crc kubenswrapper[4857]: I1128 13:47:40.495335 4857 scope.go:117] "RemoveContainer" containerID="7f1eabd058b1d022ba7f7cbccd8b90653ba66843bcbc94ab126462d61013e688" Nov 28 13:47:40 crc kubenswrapper[4857]: I1128 13:47:40.511292 4857 scope.go:117] "RemoveContainer" containerID="88fa49c3f289e73d676dc73cf57ee9db5632204f69f9b2c03c7616194fb7346f" Nov 28 13:47:40 crc kubenswrapper[4857]: I1128 13:47:40.527676 4857 scope.go:117] "RemoveContainer" containerID="442c14282bda41626b88d6fa475b7be9e797bd11222890dc05a9fb4d318de55f" Nov 28 13:47:40 crc kubenswrapper[4857]: I1128 13:47:40.547064 4857 scope.go:117] "RemoveContainer" containerID="43803a839e238f33736b9e11cf3a9902b5274095c543dca6a4d5d4938e97537d" Nov 28 13:47:48 crc kubenswrapper[4857]: I1128 13:47:48.321856 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:47:48 crc kubenswrapper[4857]: E1128 13:47:48.323100 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:48:03 crc kubenswrapper[4857]: I1128 13:48:03.309268 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:48:03 crc kubenswrapper[4857]: E1128 13:48:03.309944 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:48:15 crc kubenswrapper[4857]: I1128 13:48:15.310524 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:48:15 crc kubenswrapper[4857]: E1128 13:48:15.311813 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:48:27 crc kubenswrapper[4857]: I1128 13:48:27.309738 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:48:27 crc kubenswrapper[4857]: E1128 13:48:27.310511 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:48:31 crc kubenswrapper[4857]: I1128 13:48:31.331993 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw_9af5d950-6280-44db-85a8-91e7172b9d51/util/0.log" Nov 28 13:48:31 crc kubenswrapper[4857]: I1128 13:48:31.472439 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw_9af5d950-6280-44db-85a8-91e7172b9d51/util/0.log" Nov 28 13:48:31 crc kubenswrapper[4857]: I1128 13:48:31.490731 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw_9af5d950-6280-44db-85a8-91e7172b9d51/pull/0.log" Nov 28 13:48:31 crc kubenswrapper[4857]: I1128 13:48:31.535992 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw_9af5d950-6280-44db-85a8-91e7172b9d51/pull/0.log" Nov 28 13:48:31 crc kubenswrapper[4857]: I1128 13:48:31.801941 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw_9af5d950-6280-44db-85a8-91e7172b9d51/extract/0.log" Nov 28 13:48:31 crc kubenswrapper[4857]: I1128 13:48:31.834373 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw_9af5d950-6280-44db-85a8-91e7172b9d51/util/0.log" Nov 28 13:48:31 crc kubenswrapper[4857]: I1128 13:48:31.869456 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_5289a541d177202ae07bd3f425b86f495f0f333c3beef5f446267b0d14zz6fw_9af5d950-6280-44db-85a8-91e7172b9d51/pull/0.log" Nov 28 13:48:32 crc kubenswrapper[4857]: I1128 13:48:32.000227 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-msp7g_8ba8130c-f7e1-4cc0-8427-5a13997138ce/kube-rbac-proxy/0.log" Nov 28 13:48:32 crc kubenswrapper[4857]: I1128 13:48:32.063258 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-msp7g_8ba8130c-f7e1-4cc0-8427-5a13997138ce/manager/0.log" Nov 28 13:48:32 crc kubenswrapper[4857]: I1128 13:48:32.121551 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-fp6fc_41a3b8e7-e61b-45fc-a87e-99e2d943fd15/kube-rbac-proxy/0.log" Nov 28 13:48:32 crc kubenswrapper[4857]: I1128 13:48:32.277118 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-fr6n9_42e0b127-fc00-42c7-a7d8-9e5ea55a6590/kube-rbac-proxy/0.log" Nov 28 13:48:32 crc kubenswrapper[4857]: I1128 13:48:32.283560 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-fp6fc_41a3b8e7-e61b-45fc-a87e-99e2d943fd15/manager/0.log" Nov 28 13:48:32 crc kubenswrapper[4857]: I1128 13:48:32.388658 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-fr6n9_42e0b127-fc00-42c7-a7d8-9e5ea55a6590/manager/0.log" Nov 28 13:48:32 crc kubenswrapper[4857]: I1128 13:48:32.496055 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-cwbkd_b35bb4aa-b164-47fe-85bd-8f34b7e55e5e/kube-rbac-proxy/0.log" Nov 28 13:48:32 crc kubenswrapper[4857]: I1128 13:48:32.564003 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-cwbkd_b35bb4aa-b164-47fe-85bd-8f34b7e55e5e/manager/0.log" Nov 28 13:48:32 crc kubenswrapper[4857]: I1128 13:48:32.692976 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-wkfb4_e4010942-0dec-4e3e-8f52-f69abf7ace10/kube-rbac-proxy/0.log" Nov 28 13:48:32 crc kubenswrapper[4857]: I1128 13:48:32.742028 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-wkfb4_e4010942-0dec-4e3e-8f52-f69abf7ace10/manager/0.log" Nov 28 13:48:32 crc kubenswrapper[4857]: I1128 13:48:32.857582 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-4gn9q_3c9f0811-92a3-4681-b71e-28d474c3751e/kube-rbac-proxy/0.log" Nov 28 13:48:32 crc kubenswrapper[4857]: I1128 13:48:32.929233 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-4gn9q_3c9f0811-92a3-4681-b71e-28d474c3751e/manager/0.log" Nov 28 13:48:33 crc kubenswrapper[4857]: I1128 13:48:33.159198 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-qb9wz_d437c518-be55-44c0-b374-5c3d2d62b49a/kube-rbac-proxy/0.log" Nov 28 13:48:33 crc kubenswrapper[4857]: I1128 13:48:33.369568 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-qb9wz_d437c518-be55-44c0-b374-5c3d2d62b49a/manager/0.log" Nov 28 13:48:33 crc kubenswrapper[4857]: I1128 13:48:33.474032 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-hg5f2_cb4c2469-9178-463d-a9be-700af973c9b8/kube-rbac-proxy/0.log" Nov 28 13:48:33 crc kubenswrapper[4857]: I1128 13:48:33.506117 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-hg5f2_cb4c2469-9178-463d-a9be-700af973c9b8/manager/0.log" Nov 28 13:48:33 crc kubenswrapper[4857]: I1128 13:48:33.609638 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-r9w5w_49f7d7ee-64a4-4ce9-91ec-a76be0cdd249/kube-rbac-proxy/0.log" Nov 28 13:48:33 crc kubenswrapper[4857]: I1128 13:48:33.725847 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-r9w5w_49f7d7ee-64a4-4ce9-91ec-a76be0cdd249/manager/0.log" Nov 28 13:48:33 crc kubenswrapper[4857]: I1128 13:48:33.866105 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-4wtvs_502d3010-ae32-4e6f-a7bf-614cd1da9dda/manager/0.log" Nov 28 13:48:33 crc kubenswrapper[4857]: I1128 13:48:33.866600 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-4wtvs_502d3010-ae32-4e6f-a7bf-614cd1da9dda/kube-rbac-proxy/0.log" Nov 28 13:48:33 crc kubenswrapper[4857]: I1128 13:48:33.940284 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-x4dfd_968d6179-1a75-405c-97cc-cad775d59e28/kube-rbac-proxy/0.log" Nov 28 13:48:34 crc kubenswrapper[4857]: I1128 13:48:34.134759 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-x4dfd_968d6179-1a75-405c-97cc-cad775d59e28/manager/0.log" Nov 28 13:48:34 crc kubenswrapper[4857]: I1128 13:48:34.193526 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-c9dqn_a4e0aa4d-510c-4880-84fd-998e7527e41d/kube-rbac-proxy/0.log" Nov 28 13:48:34 crc kubenswrapper[4857]: I1128 13:48:34.291217 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-c9dqn_a4e0aa4d-510c-4880-84fd-998e7527e41d/manager/0.log" Nov 28 13:48:34 crc kubenswrapper[4857]: I1128 13:48:34.396160 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-nv4bx_4f45a249-50b4-466c-a54b-9205e5a127e7/kube-rbac-proxy/0.log" Nov 28 13:48:34 crc kubenswrapper[4857]: I1128 13:48:34.558098 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-nv4bx_4f45a249-50b4-466c-a54b-9205e5a127e7/manager/0.log" Nov 28 13:48:34 crc kubenswrapper[4857]: I1128 13:48:34.634765 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-zzqvn_78748478-834d-4797-b214-b72206253e23/kube-rbac-proxy/0.log" Nov 28 13:48:34 crc kubenswrapper[4857]: I1128 13:48:34.672994 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-zzqvn_78748478-834d-4797-b214-b72206253e23/manager/0.log" Nov 28 13:48:34 crc kubenswrapper[4857]: I1128 13:48:34.815364 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm_f39d1519-87df-476d-b47a-8b2857c23843/kube-rbac-proxy/0.log" Nov 28 13:48:34 crc kubenswrapper[4857]: I1128 13:48:34.833716 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bdmqkm_f39d1519-87df-476d-b47a-8b2857c23843/manager/0.log" Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.188907 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-ffb7b6fb5-rn794_e8cd6acf-ae30-4fe6-bb1e-7075351b6306/operator/0.log" Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.396290 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-9b28s_2f9dbaf9-124d-4667-9744-afd253fd2c68/registry-server/0.log" Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.510205 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-7lsxw_faeedd17-af99-4393-bf5c-ac5cc3b2d7b5/kube-rbac-proxy/0.log" Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.700913 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-7lsxw_faeedd17-af99-4393-bf5c-ac5cc3b2d7b5/manager/0.log" Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.718936 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-ngcdx_90c064f2-ec25-44c8-ab5a-17fdb307cfe6/kube-rbac-proxy/0.log" Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.801134 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-ngcdx_90c064f2-ec25-44c8-ab5a-17fdb307cfe6/manager/0.log" Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.813399 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-888bbc64f-m879m_340e937e-6fd3-4fd4-829e-2ac5972542b7/manager/0.log" Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.929400 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-b9nj2_5d82c76b-a0e1-4001-8676-390818e9edaf/operator/0.log" Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.984194 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-64gmq_729a83b9-4e01-4943-abed-58960ed40e68/kube-rbac-proxy/0.log" Nov 28 13:48:36 crc kubenswrapper[4857]: I1128 13:48:36.080635 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-64gmq_729a83b9-4e01-4943-abed-58960ed40e68/manager/0.log" Nov 28 13:48:36 crc kubenswrapper[4857]: I1128 13:48:36.143831 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-nwrtk_de166f90-9b9c-49b3-b12b-0e36ae5db4da/kube-rbac-proxy/0.log" Nov 28 13:48:36 crc kubenswrapper[4857]: I1128 13:48:36.236442 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-nwrtk_de166f90-9b9c-49b3-b12b-0e36ae5db4da/manager/0.log" Nov 28 13:48:36 crc kubenswrapper[4857]: I1128 13:48:36.273948 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-dlmmw_af0c9c82-73f4-4bff-b0f6-a94c0d6e731a/kube-rbac-proxy/0.log" Nov 28 13:48:36 crc kubenswrapper[4857]: I1128 13:48:36.335009 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-dlmmw_af0c9c82-73f4-4bff-b0f6-a94c0d6e731a/manager/0.log" Nov 28 13:48:36 crc kubenswrapper[4857]: I1128 13:48:36.420399 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-ggrfl_5c39c627-3379-4971-8d55-48bede6d34ec/manager/0.log" Nov 28 13:48:36 crc kubenswrapper[4857]: I1128 13:48:36.430811 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-ggrfl_5c39c627-3379-4971-8d55-48bede6d34ec/kube-rbac-proxy/0.log" Nov 28 13:48:38 crc kubenswrapper[4857]: I1128 13:48:38.313376 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:48:38 crc kubenswrapper[4857]: E1128 13:48:38.314203 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:48:40 crc kubenswrapper[4857]: I1128 13:48:40.755944 4857 scope.go:117] "RemoveContainer" containerID="f002b9da5d304a77fca31416dd0c123765cbee7f0c7495a3f41715d72eeaf82b" Nov 28 13:48:40 crc kubenswrapper[4857]: I1128 13:48:40.800441 4857 scope.go:117] "RemoveContainer" containerID="c8b37b179bae98a24592b60f7c52271b9cd46845ca9bc9a901e30860b3b46753" Nov 28 13:48:40 crc kubenswrapper[4857]: I1128 13:48:40.838486 4857 scope.go:117] "RemoveContainer" containerID="776ea66e8f82f77c0554d725c1d8158421a9ae7c37c383b7b900b848ca6121a5" Nov 28 13:48:50 crc kubenswrapper[4857]: I1128 13:48:50.309379 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:48:50 crc kubenswrapper[4857]: E1128 13:48:50.310938 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:48:54 crc kubenswrapper[4857]: I1128 13:48:54.735146 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-mfb8j_75c61d4f-c7df-4f0f-b643-6bde1458075a/control-plane-machine-set-operator/0.log" Nov 28 13:48:54 crc kubenswrapper[4857]: I1128 13:48:54.901159 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-pxdz7_eba66557-699a-4be9-bc8e-fcedf6155f7e/kube-rbac-proxy/0.log" Nov 28 13:48:54 crc kubenswrapper[4857]: I1128 13:48:54.933025 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-pxdz7_eba66557-699a-4be9-bc8e-fcedf6155f7e/machine-api-operator/0.log" Nov 28 13:49:04 crc kubenswrapper[4857]: I1128 13:49:04.310307 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:49:04 crc kubenswrapper[4857]: E1128 13:49:04.311272 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:49:07 crc kubenswrapper[4857]: I1128 13:49:07.193053 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-pp979_0726d67c-02a4-48e6-b815-9f0d8c567c1a/cert-manager-controller/0.log" Nov 28 13:49:07 crc kubenswrapper[4857]: I1128 13:49:07.369431 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-cvgnl_28d034d3-c255-40c7-b019-6b71be38c34f/cert-manager-cainjector/0.log" Nov 28 13:49:07 crc kubenswrapper[4857]: I1128 13:49:07.386093 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-rdxp4_95fc4b68-c383-4f0c-929d-0bf89f13183b/cert-manager-webhook/0.log" Nov 28 13:49:17 crc kubenswrapper[4857]: I1128 13:49:17.310241 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:49:17 crc kubenswrapper[4857]: E1128 13:49:17.311092 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:49:20 crc kubenswrapper[4857]: I1128 13:49:20.000140 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-ht4fl_3528f191-ca62-461c-a93f-8fb8758d76af/nmstate-console-plugin/0.log" Nov 28 13:49:20 crc kubenswrapper[4857]: I1128 13:49:20.204357 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-njxfb_146db7a4-f901-4160-8053-7fe4f8063e22/nmstate-handler/0.log" Nov 28 13:49:20 crc kubenswrapper[4857]: I1128 13:49:20.247330 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-qlhzf_07c3f6d4-5b62-4140-a9d8-a5f26841a487/kube-rbac-proxy/0.log" Nov 28 13:49:20 crc kubenswrapper[4857]: I1128 13:49:20.285618 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-qlhzf_07c3f6d4-5b62-4140-a9d8-a5f26841a487/nmstate-metrics/0.log" Nov 28 13:49:20 crc kubenswrapper[4857]: I1128 13:49:20.473087 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-ds5zt_fcfc3e02-2c67-4436-9719-fc3fe00bb2e0/nmstate-operator/0.log" Nov 28 13:49:20 crc kubenswrapper[4857]: I1128 13:49:20.489473 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-xchwf_8aad4947-867d-42d1-af5d-d9d8284d21e7/nmstate-webhook/0.log" Nov 28 13:49:32 crc kubenswrapper[4857]: I1128 13:49:32.309878 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:49:32 crc kubenswrapper[4857]: E1128 13:49:32.310817 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jdgls_openshift-machine-config-operator(aba2e99a-c0de-4ae5-b347-de1565fd9d68)\"" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.613142 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-sp9cd_8e85b575-3f41-459a-b987-663c3fc8cd4c/kube-rbac-proxy/0.log" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.919335 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-m7fq9_981ad778-03ff-4a46-b35e-f670fe146521/cp-frr-files/0.log" Nov 28 13:49:36 crc kubenswrapper[4857]: I1128 13:49:36.029428 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-sp9cd_8e85b575-3f41-459a-b987-663c3fc8cd4c/controller/0.log" Nov 28 13:49:36 crc kubenswrapper[4857]: I1128 13:49:36.131265 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-m7fq9_981ad778-03ff-4a46-b35e-f670fe146521/cp-frr-files/0.log" Nov 28 13:49:36 crc kubenswrapper[4857]: I1128 13:49:36.253429 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-m7fq9_981ad778-03ff-4a46-b35e-f670fe146521/cp-metrics/0.log" Nov 28 13:49:36 crc kubenswrapper[4857]: I1128 13:49:36.282890 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-m7fq9_981ad778-03ff-4a46-b35e-f670fe146521/cp-reloader/0.log" Nov 28 13:49:36 crc kubenswrapper[4857]: I1128 13:49:36.320401 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-m7fq9_981ad778-03ff-4a46-b35e-f670fe146521/cp-reloader/0.log" Nov 28 13:49:36 crc kubenswrapper[4857]: I1128 13:49:36.499489 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-m7fq9_981ad778-03ff-4a46-b35e-f670fe146521/cp-frr-files/0.log" Nov 28 13:49:36 crc kubenswrapper[4857]: I1128 13:49:36.499511 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-m7fq9_981ad778-03ff-4a46-b35e-f670fe146521/cp-metrics/0.log" Nov 28 13:49:36 crc kubenswrapper[4857]: I1128 13:49:36.524058 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-m7fq9_981ad778-03ff-4a46-b35e-f670fe146521/cp-reloader/0.log" Nov 28 13:49:36 crc kubenswrapper[4857]: I1128 13:49:36.525125 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-m7fq9_981ad778-03ff-4a46-b35e-f670fe146521/cp-metrics/0.log" Nov 28 13:49:36 crc kubenswrapper[4857]: I1128 13:49:36.684170 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-m7fq9_981ad778-03ff-4a46-b35e-f670fe146521/cp-reloader/0.log" Nov 28 13:49:36 crc kubenswrapper[4857]: I1128 13:49:36.696635 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-m7fq9_981ad778-03ff-4a46-b35e-f670fe146521/cp-frr-files/0.log" Nov 28 13:49:36 crc kubenswrapper[4857]: I1128 13:49:36.703356 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-m7fq9_981ad778-03ff-4a46-b35e-f670fe146521/cp-metrics/0.log" Nov 28 13:49:36 crc kubenswrapper[4857]: I1128 13:49:36.715093 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-m7fq9_981ad778-03ff-4a46-b35e-f670fe146521/controller/0.log" Nov 28 13:49:36 crc kubenswrapper[4857]: I1128 13:49:36.914022 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-m7fq9_981ad778-03ff-4a46-b35e-f670fe146521/frr-metrics/0.log" Nov 28 13:49:36 crc kubenswrapper[4857]: I1128 13:49:36.938556 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-m7fq9_981ad778-03ff-4a46-b35e-f670fe146521/kube-rbac-proxy-frr/0.log" Nov 28 13:49:36 crc kubenswrapper[4857]: I1128 13:49:36.956365 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-m7fq9_981ad778-03ff-4a46-b35e-f670fe146521/kube-rbac-proxy/0.log" Nov 28 13:49:37 crc kubenswrapper[4857]: I1128 13:49:37.137144 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-m7fq9_981ad778-03ff-4a46-b35e-f670fe146521/reloader/0.log" Nov 28 13:49:37 crc kubenswrapper[4857]: I1128 13:49:37.168828 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-nkjkb_0904931a-1033-4fd7-a34e-6a30ced4ec31/frr-k8s-webhook-server/0.log" Nov 28 13:49:37 crc kubenswrapper[4857]: I1128 13:49:37.338327 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-8475cb8447-qrwdp_4c4fc2c2-42ae-4899-b718-b86cbde512ab/manager/0.log" Nov 28 13:49:37 crc kubenswrapper[4857]: I1128 13:49:37.628239 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-77fd5fc7fd-q549n_65b602be-5783-491e-b2e6-f8abd937820e/webhook-server/0.log" Nov 28 13:49:37 crc kubenswrapper[4857]: I1128 13:49:37.826135 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-xhclh_fcc84880-b152-4494-b739-40d81a896a41/kube-rbac-proxy/0.log" Nov 28 13:49:38 crc kubenswrapper[4857]: I1128 13:49:38.080336 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-m7fq9_981ad778-03ff-4a46-b35e-f670fe146521/frr/0.log" Nov 28 13:49:38 crc kubenswrapper[4857]: I1128 13:49:38.204573 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-xhclh_fcc84880-b152-4494-b739-40d81a896a41/speaker/0.log" Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.934825 4857 scope.go:117] "RemoveContainer" containerID="64b64c3a1efcb9350b410451f1a084657b11967bf78371a018aa5e7c9647b4ed" Nov 28 13:49:43 crc kubenswrapper[4857]: I1128 13:49:43.310449 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:49:43 crc kubenswrapper[4857]: I1128 13:49:43.971447 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerStarted","Data":"7006ae030a773cc835d219f2a3366ff0efca75395f6a2dec2038ba3bfe337ea8"} Nov 28 13:49:51 crc kubenswrapper[4857]: I1128 13:49:51.277453 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6_dca407e9-a877-483e-82f3-1c1288b63d52/util/0.log" Nov 28 13:49:51 crc kubenswrapper[4857]: I1128 13:49:51.426710 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6_dca407e9-a877-483e-82f3-1c1288b63d52/util/0.log" Nov 28 13:49:51 crc kubenswrapper[4857]: I1128 13:49:51.446021 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6_dca407e9-a877-483e-82f3-1c1288b63d52/pull/0.log" Nov 28 13:49:51 crc kubenswrapper[4857]: I1128 13:49:51.489021 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6_dca407e9-a877-483e-82f3-1c1288b63d52/pull/0.log" Nov 28 13:49:51 crc kubenswrapper[4857]: I1128 13:49:51.639645 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6_dca407e9-a877-483e-82f3-1c1288b63d52/extract/0.log" Nov 28 13:49:51 crc kubenswrapper[4857]: I1128 13:49:51.647206 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6_dca407e9-a877-483e-82f3-1c1288b63d52/util/0.log" Nov 28 13:49:51 crc kubenswrapper[4857]: I1128 13:49:51.668492 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931agstn6_dca407e9-a877-483e-82f3-1c1288b63d52/pull/0.log" Nov 28 13:49:51 crc kubenswrapper[4857]: I1128 13:49:51.806232 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2_2b673ee4-31c9-4a17-a188-5aa63017fcf7/util/0.log" Nov 28 13:49:51 crc kubenswrapper[4857]: I1128 13:49:51.991636 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2_2b673ee4-31c9-4a17-a188-5aa63017fcf7/pull/0.log" Nov 28 13:49:51 crc kubenswrapper[4857]: I1128 13:49:51.999329 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2_2b673ee4-31c9-4a17-a188-5aa63017fcf7/util/0.log" Nov 28 13:49:52 crc kubenswrapper[4857]: I1128 13:49:52.022839 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2_2b673ee4-31c9-4a17-a188-5aa63017fcf7/pull/0.log" Nov 28 13:49:52 crc kubenswrapper[4857]: I1128 13:49:52.168490 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2_2b673ee4-31c9-4a17-a188-5aa63017fcf7/util/0.log" Nov 28 13:49:52 crc kubenswrapper[4857]: I1128 13:49:52.227417 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2_2b673ee4-31c9-4a17-a188-5aa63017fcf7/extract/0.log" Nov 28 13:49:52 crc kubenswrapper[4857]: I1128 13:49:52.394618 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s_28fef5c0-5b1c-4bc3-a288-6268042fe12c/util/0.log" Nov 28 13:49:52 crc kubenswrapper[4857]: I1128 13:49:52.417109 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fd89c2_2b673ee4-31c9-4a17-a188-5aa63017fcf7/pull/0.log" Nov 28 13:49:52 crc kubenswrapper[4857]: I1128 13:49:52.569137 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s_28fef5c0-5b1c-4bc3-a288-6268042fe12c/pull/0.log" Nov 28 13:49:52 crc kubenswrapper[4857]: I1128 13:49:52.583828 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s_28fef5c0-5b1c-4bc3-a288-6268042fe12c/util/0.log" Nov 28 13:49:52 crc kubenswrapper[4857]: I1128 13:49:52.595147 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s_28fef5c0-5b1c-4bc3-a288-6268042fe12c/pull/0.log" Nov 28 13:49:52 crc kubenswrapper[4857]: I1128 13:49:52.902073 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s_28fef5c0-5b1c-4bc3-a288-6268042fe12c/pull/0.log" Nov 28 13:49:52 crc kubenswrapper[4857]: I1128 13:49:52.904955 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s_28fef5c0-5b1c-4bc3-a288-6268042fe12c/extract/0.log" Nov 28 13:49:52 crc kubenswrapper[4857]: I1128 13:49:52.910380 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83bng6s_28fef5c0-5b1c-4bc3-a288-6268042fe12c/util/0.log" Nov 28 13:49:53 crc kubenswrapper[4857]: I1128 13:49:53.020453 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cl69z_0178b5af-6910-4201-99c0-0053310327a0/extract-utilities/0.log" Nov 28 13:49:53 crc kubenswrapper[4857]: I1128 13:49:53.150217 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cl69z_0178b5af-6910-4201-99c0-0053310327a0/extract-utilities/0.log" Nov 28 13:49:53 crc kubenswrapper[4857]: I1128 13:49:53.178874 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cl69z_0178b5af-6910-4201-99c0-0053310327a0/extract-content/0.log" Nov 28 13:49:53 crc kubenswrapper[4857]: I1128 13:49:53.181204 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cl69z_0178b5af-6910-4201-99c0-0053310327a0/extract-content/0.log" Nov 28 13:49:53 crc kubenswrapper[4857]: I1128 13:49:53.352670 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cl69z_0178b5af-6910-4201-99c0-0053310327a0/extract-utilities/0.log" Nov 28 13:49:53 crc kubenswrapper[4857]: I1128 13:49:53.360099 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cl69z_0178b5af-6910-4201-99c0-0053310327a0/extract-content/0.log" Nov 28 13:49:53 crc kubenswrapper[4857]: I1128 13:49:53.604478 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-hjkx5_230a73ed-8a25-4d36-aebb-47f12ad15d7d/extract-utilities/0.log" Nov 28 13:49:53 crc kubenswrapper[4857]: I1128 13:49:53.665063 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cl69z_0178b5af-6910-4201-99c0-0053310327a0/registry-server/0.log" Nov 28 13:49:53 crc kubenswrapper[4857]: I1128 13:49:53.790406 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-hjkx5_230a73ed-8a25-4d36-aebb-47f12ad15d7d/extract-utilities/0.log" Nov 28 13:49:53 crc kubenswrapper[4857]: I1128 13:49:53.796702 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-hjkx5_230a73ed-8a25-4d36-aebb-47f12ad15d7d/extract-content/0.log" Nov 28 13:49:53 crc kubenswrapper[4857]: I1128 13:49:53.802858 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-hjkx5_230a73ed-8a25-4d36-aebb-47f12ad15d7d/extract-content/0.log" Nov 28 13:49:53 crc kubenswrapper[4857]: I1128 13:49:53.961875 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-hjkx5_230a73ed-8a25-4d36-aebb-47f12ad15d7d/extract-utilities/0.log" Nov 28 13:49:53 crc kubenswrapper[4857]: I1128 13:49:53.972258 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-hjkx5_230a73ed-8a25-4d36-aebb-47f12ad15d7d/extract-content/0.log" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.191391 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-bhdpz_89b4b3ec-4394-4e95-9877-330c0613be93/marketplace-operator/0.log" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.249366 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-hjkx5_230a73ed-8a25-4d36-aebb-47f12ad15d7d/registry-server/0.log" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.263375 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-2pltp_87a26cb0-0ac0-44ca-8941-943e7e2bb155/extract-utilities/0.log" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.440344 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-2pltp_87a26cb0-0ac0-44ca-8941-943e7e2bb155/extract-content/0.log" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.459892 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-2pltp_87a26cb0-0ac0-44ca-8941-943e7e2bb155/extract-utilities/0.log" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.492710 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-2pltp_87a26cb0-0ac0-44ca-8941-943e7e2bb155/extract-content/0.log" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.658915 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-2pltp_87a26cb0-0ac0-44ca-8941-943e7e2bb155/extract-content/0.log" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.681773 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-2pltp_87a26cb0-0ac0-44ca-8941-943e7e2bb155/extract-utilities/0.log" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.736849 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-2pltp_87a26cb0-0ac0-44ca-8941-943e7e2bb155/registry-server/0.log" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.774567 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-s4dd7_34b5fe72-977e-444a-94ab-5a135d1a3417/extract-utilities/0.log" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.929117 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-s4dd7_34b5fe72-977e-444a-94ab-5a135d1a3417/extract-utilities/0.log" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.952961 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-s4dd7_34b5fe72-977e-444a-94ab-5a135d1a3417/extract-content/0.log" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.953936 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-s4dd7_34b5fe72-977e-444a-94ab-5a135d1a3417/extract-content/0.log" Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.087727 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-s4dd7_34b5fe72-977e-444a-94ab-5a135d1a3417/extract-content/0.log" Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.124492 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-s4dd7_34b5fe72-977e-444a-94ab-5a135d1a3417/extract-utilities/0.log" Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.319392 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-s4dd7_34b5fe72-977e-444a-94ab-5a135d1a3417/registry-server/0.log" Nov 28 13:50:41 crc kubenswrapper[4857]: I1128 13:50:41.040810 4857 scope.go:117] "RemoveContainer" containerID="61e85e27a22224665fbae14e722d6be8aeebc908a627e8c7f4e2f1f22deb8c46" Nov 28 13:50:41 crc kubenswrapper[4857]: I1128 13:50:41.072415 4857 scope.go:117] "RemoveContainer" containerID="12e15364826b54c9d3daec1b157d14810f19e37315fa2290659bf5d554e0e354" Nov 28 13:50:41 crc kubenswrapper[4857]: I1128 13:50:41.131522 4857 scope.go:117] "RemoveContainer" containerID="0bc4ed5492f1aa664fc5c8617d9474c0b50cc73e3dee3adca82c42e83d55f771" Nov 28 13:51:00 crc kubenswrapper[4857]: I1128 13:51:00.701784 4857 generic.go:334] "Generic (PLEG): container finished" podID="0cd7df22-643a-4ea0-a182-9f40fca04305" containerID="62c62ec6939c2ad08f2ff19215b2086a7083fd71f30e682e92a4da170794129a" exitCode=0 Nov 28 13:51:00 crc kubenswrapper[4857]: I1128 13:51:00.701833 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sxgfb/must-gather-wl5fx" event={"ID":"0cd7df22-643a-4ea0-a182-9f40fca04305","Type":"ContainerDied","Data":"62c62ec6939c2ad08f2ff19215b2086a7083fd71f30e682e92a4da170794129a"} Nov 28 13:51:00 crc kubenswrapper[4857]: I1128 13:51:00.703226 4857 scope.go:117] "RemoveContainer" containerID="62c62ec6939c2ad08f2ff19215b2086a7083fd71f30e682e92a4da170794129a" Nov 28 13:51:01 crc kubenswrapper[4857]: I1128 13:51:01.006996 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-sxgfb_must-gather-wl5fx_0cd7df22-643a-4ea0-a182-9f40fca04305/gather/0.log" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.932496 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-sxgfb/must-gather-wl5fx"] Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.933220 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-sxgfb/must-gather-wl5fx" podUID="0cd7df22-643a-4ea0-a182-9f40fca04305" containerName="copy" containerID="cri-o://e7c0f1a2ff3cfee597f67f3bc29d77fd31490547b96129404cba033cdd4a4dd1" gracePeriod=2 Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.940421 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-sxgfb/must-gather-wl5fx"] Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.293074 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-sxgfb_must-gather-wl5fx_0cd7df22-643a-4ea0-a182-9f40fca04305/copy/0.log" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.293934 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sxgfb/must-gather-wl5fx" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.367530 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0cd7df22-643a-4ea0-a182-9f40fca04305-must-gather-output\") pod \"0cd7df22-643a-4ea0-a182-9f40fca04305\" (UID: \"0cd7df22-643a-4ea0-a182-9f40fca04305\") " Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.367568 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qlq72\" (UniqueName: \"kubernetes.io/projected/0cd7df22-643a-4ea0-a182-9f40fca04305-kube-api-access-qlq72\") pod \"0cd7df22-643a-4ea0-a182-9f40fca04305\" (UID: \"0cd7df22-643a-4ea0-a182-9f40fca04305\") " Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.373317 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0cd7df22-643a-4ea0-a182-9f40fca04305-kube-api-access-qlq72" (OuterVolumeSpecName: "kube-api-access-qlq72") pod "0cd7df22-643a-4ea0-a182-9f40fca04305" (UID: "0cd7df22-643a-4ea0-a182-9f40fca04305"). InnerVolumeSpecName "kube-api-access-qlq72". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.454920 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0cd7df22-643a-4ea0-a182-9f40fca04305-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "0cd7df22-643a-4ea0-a182-9f40fca04305" (UID: "0cd7df22-643a-4ea0-a182-9f40fca04305"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.469380 4857 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0cd7df22-643a-4ea0-a182-9f40fca04305-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.469429 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qlq72\" (UniqueName: \"kubernetes.io/projected/0cd7df22-643a-4ea0-a182-9f40fca04305-kube-api-access-qlq72\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.771401 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-sxgfb_must-gather-wl5fx_0cd7df22-643a-4ea0-a182-9f40fca04305/copy/0.log" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.772160 4857 generic.go:334] "Generic (PLEG): container finished" podID="0cd7df22-643a-4ea0-a182-9f40fca04305" containerID="e7c0f1a2ff3cfee597f67f3bc29d77fd31490547b96129404cba033cdd4a4dd1" exitCode=143 Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.772186 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sxgfb/must-gather-wl5fx" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.772217 4857 scope.go:117] "RemoveContainer" containerID="e7c0f1a2ff3cfee597f67f3bc29d77fd31490547b96129404cba033cdd4a4dd1" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.796387 4857 scope.go:117] "RemoveContainer" containerID="62c62ec6939c2ad08f2ff19215b2086a7083fd71f30e682e92a4da170794129a" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.860714 4857 scope.go:117] "RemoveContainer" containerID="e7c0f1a2ff3cfee597f67f3bc29d77fd31490547b96129404cba033cdd4a4dd1" Nov 28 13:51:08 crc kubenswrapper[4857]: E1128 13:51:08.868508 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7c0f1a2ff3cfee597f67f3bc29d77fd31490547b96129404cba033cdd4a4dd1\": container with ID starting with e7c0f1a2ff3cfee597f67f3bc29d77fd31490547b96129404cba033cdd4a4dd1 not found: ID does not exist" containerID="e7c0f1a2ff3cfee597f67f3bc29d77fd31490547b96129404cba033cdd4a4dd1" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.868569 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7c0f1a2ff3cfee597f67f3bc29d77fd31490547b96129404cba033cdd4a4dd1"} err="failed to get container status \"e7c0f1a2ff3cfee597f67f3bc29d77fd31490547b96129404cba033cdd4a4dd1\": rpc error: code = NotFound desc = could not find container \"e7c0f1a2ff3cfee597f67f3bc29d77fd31490547b96129404cba033cdd4a4dd1\": container with ID starting with e7c0f1a2ff3cfee597f67f3bc29d77fd31490547b96129404cba033cdd4a4dd1 not found: ID does not exist" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.868602 4857 scope.go:117] "RemoveContainer" containerID="62c62ec6939c2ad08f2ff19215b2086a7083fd71f30e682e92a4da170794129a" Nov 28 13:51:08 crc kubenswrapper[4857]: E1128 13:51:08.868954 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62c62ec6939c2ad08f2ff19215b2086a7083fd71f30e682e92a4da170794129a\": container with ID starting with 62c62ec6939c2ad08f2ff19215b2086a7083fd71f30e682e92a4da170794129a not found: ID does not exist" containerID="62c62ec6939c2ad08f2ff19215b2086a7083fd71f30e682e92a4da170794129a" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.868983 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62c62ec6939c2ad08f2ff19215b2086a7083fd71f30e682e92a4da170794129a"} err="failed to get container status \"62c62ec6939c2ad08f2ff19215b2086a7083fd71f30e682e92a4da170794129a\": rpc error: code = NotFound desc = could not find container \"62c62ec6939c2ad08f2ff19215b2086a7083fd71f30e682e92a4da170794129a\": container with ID starting with 62c62ec6939c2ad08f2ff19215b2086a7083fd71f30e682e92a4da170794129a not found: ID does not exist" Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.321257 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0cd7df22-643a-4ea0-a182-9f40fca04305" path="/var/lib/kubelet/pods/0cd7df22-643a-4ea0-a182-9f40fca04305/volumes" Nov 28 13:52:03 crc kubenswrapper[4857]: I1128 13:52:03.177458 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:52:03 crc kubenswrapper[4857]: I1128 13:52:03.177987 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.177983 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.178543 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:53:03 crc kubenswrapper[4857]: I1128 13:53:03.178241 4857 patch_prober.go:28] interesting pod/machine-config-daemon-jdgls container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:53:03 crc kubenswrapper[4857]: I1128 13:53:03.179042 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:53:03 crc kubenswrapper[4857]: I1128 13:53:03.179119 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" Nov 28 13:53:03 crc kubenswrapper[4857]: I1128 13:53:03.180342 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7006ae030a773cc835d219f2a3366ff0efca75395f6a2dec2038ba3bfe337ea8"} pod="openshift-machine-config-operator/machine-config-daemon-jdgls" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 13:53:03 crc kubenswrapper[4857]: I1128 13:53:03.180681 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" podUID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerName="machine-config-daemon" containerID="cri-o://7006ae030a773cc835d219f2a3366ff0efca75395f6a2dec2038ba3bfe337ea8" gracePeriod=600 Nov 28 13:53:03 crc kubenswrapper[4857]: I1128 13:53:03.891152 4857 generic.go:334] "Generic (PLEG): container finished" podID="aba2e99a-c0de-4ae5-b347-de1565fd9d68" containerID="7006ae030a773cc835d219f2a3366ff0efca75395f6a2dec2038ba3bfe337ea8" exitCode=0 Nov 28 13:53:03 crc kubenswrapper[4857]: I1128 13:53:03.891197 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerDied","Data":"7006ae030a773cc835d219f2a3366ff0efca75395f6a2dec2038ba3bfe337ea8"} Nov 28 13:53:03 crc kubenswrapper[4857]: I1128 13:53:03.891488 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jdgls" event={"ID":"aba2e99a-c0de-4ae5-b347-de1565fd9d68","Type":"ContainerStarted","Data":"5cc215c0caed1f97a3ef9d1425f7846a8b932d709d10683f6db815e763b6db0a"} Nov 28 13:53:03 crc kubenswrapper[4857]: I1128 13:53:03.891571 4857 scope.go:117] "RemoveContainer" containerID="aec981e3acc879a49c89954e63ed94f0f833f8b03ca2763d8a32d49a577ceda7" Nov 28 13:53:42 crc kubenswrapper[4857]: I1128 13:53:42.714952 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hc5mg"] Nov 28 13:53:42 crc kubenswrapper[4857]: E1128 13:53:42.716474 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cd7df22-643a-4ea0-a182-9f40fca04305" containerName="gather" Nov 28 13:53:42 crc kubenswrapper[4857]: I1128 13:53:42.716507 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cd7df22-643a-4ea0-a182-9f40fca04305" containerName="gather" Nov 28 13:53:42 crc kubenswrapper[4857]: E1128 13:53:42.716541 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cd7df22-643a-4ea0-a182-9f40fca04305" containerName="copy" Nov 28 13:53:42 crc kubenswrapper[4857]: I1128 13:53:42.716557 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cd7df22-643a-4ea0-a182-9f40fca04305" containerName="copy" Nov 28 13:53:42 crc kubenswrapper[4857]: I1128 13:53:42.716974 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cd7df22-643a-4ea0-a182-9f40fca04305" containerName="copy" Nov 28 13:53:42 crc kubenswrapper[4857]: I1128 13:53:42.717006 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cd7df22-643a-4ea0-a182-9f40fca04305" containerName="gather" Nov 28 13:53:42 crc kubenswrapper[4857]: I1128 13:53:42.719670 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hc5mg" Nov 28 13:53:42 crc kubenswrapper[4857]: I1128 13:53:42.726921 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hc5mg"] Nov 28 13:53:42 crc kubenswrapper[4857]: I1128 13:53:42.855930 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7e9e5a4-4fa3-4237-a530-a400153b72c8-utilities\") pod \"redhat-operators-hc5mg\" (UID: \"a7e9e5a4-4fa3-4237-a530-a400153b72c8\") " pod="openshift-marketplace/redhat-operators-hc5mg" Nov 28 13:53:42 crc kubenswrapper[4857]: I1128 13:53:42.856015 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqvqj\" (UniqueName: \"kubernetes.io/projected/a7e9e5a4-4fa3-4237-a530-a400153b72c8-kube-api-access-kqvqj\") pod \"redhat-operators-hc5mg\" (UID: \"a7e9e5a4-4fa3-4237-a530-a400153b72c8\") " pod="openshift-marketplace/redhat-operators-hc5mg" Nov 28 13:53:42 crc kubenswrapper[4857]: I1128 13:53:42.856088 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7e9e5a4-4fa3-4237-a530-a400153b72c8-catalog-content\") pod \"redhat-operators-hc5mg\" (UID: \"a7e9e5a4-4fa3-4237-a530-a400153b72c8\") " pod="openshift-marketplace/redhat-operators-hc5mg" Nov 28 13:53:42 crc kubenswrapper[4857]: I1128 13:53:42.957422 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqvqj\" (UniqueName: \"kubernetes.io/projected/a7e9e5a4-4fa3-4237-a530-a400153b72c8-kube-api-access-kqvqj\") pod \"redhat-operators-hc5mg\" (UID: \"a7e9e5a4-4fa3-4237-a530-a400153b72c8\") " pod="openshift-marketplace/redhat-operators-hc5mg" Nov 28 13:53:42 crc kubenswrapper[4857]: I1128 13:53:42.957511 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7e9e5a4-4fa3-4237-a530-a400153b72c8-catalog-content\") pod \"redhat-operators-hc5mg\" (UID: \"a7e9e5a4-4fa3-4237-a530-a400153b72c8\") " pod="openshift-marketplace/redhat-operators-hc5mg" Nov 28 13:53:42 crc kubenswrapper[4857]: I1128 13:53:42.957783 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7e9e5a4-4fa3-4237-a530-a400153b72c8-utilities\") pod \"redhat-operators-hc5mg\" (UID: \"a7e9e5a4-4fa3-4237-a530-a400153b72c8\") " pod="openshift-marketplace/redhat-operators-hc5mg" Nov 28 13:53:42 crc kubenswrapper[4857]: I1128 13:53:42.958256 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7e9e5a4-4fa3-4237-a530-a400153b72c8-catalog-content\") pod \"redhat-operators-hc5mg\" (UID: \"a7e9e5a4-4fa3-4237-a530-a400153b72c8\") " pod="openshift-marketplace/redhat-operators-hc5mg" Nov 28 13:53:42 crc kubenswrapper[4857]: I1128 13:53:42.958317 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7e9e5a4-4fa3-4237-a530-a400153b72c8-utilities\") pod \"redhat-operators-hc5mg\" (UID: \"a7e9e5a4-4fa3-4237-a530-a400153b72c8\") " pod="openshift-marketplace/redhat-operators-hc5mg" Nov 28 13:53:42 crc kubenswrapper[4857]: I1128 13:53:42.977507 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqvqj\" (UniqueName: \"kubernetes.io/projected/a7e9e5a4-4fa3-4237-a530-a400153b72c8-kube-api-access-kqvqj\") pod \"redhat-operators-hc5mg\" (UID: \"a7e9e5a4-4fa3-4237-a530-a400153b72c8\") " pod="openshift-marketplace/redhat-operators-hc5mg" Nov 28 13:53:43 crc kubenswrapper[4857]: I1128 13:53:43.048890 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hc5mg" Nov 28 13:53:43 crc kubenswrapper[4857]: I1128 13:53:43.475614 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hc5mg"] Nov 28 13:53:44 crc kubenswrapper[4857]: I1128 13:53:44.294507 4857 generic.go:334] "Generic (PLEG): container finished" podID="a7e9e5a4-4fa3-4237-a530-a400153b72c8" containerID="d7288925d5441e60e53b5ba66ec69408c57f81f0524d10c6c1ccf4a4c4c7ed92" exitCode=0 Nov 28 13:53:44 crc kubenswrapper[4857]: I1128 13:53:44.294613 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hc5mg" event={"ID":"a7e9e5a4-4fa3-4237-a530-a400153b72c8","Type":"ContainerDied","Data":"d7288925d5441e60e53b5ba66ec69408c57f81f0524d10c6c1ccf4a4c4c7ed92"} Nov 28 13:53:44 crc kubenswrapper[4857]: I1128 13:53:44.295016 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hc5mg" event={"ID":"a7e9e5a4-4fa3-4237-a530-a400153b72c8","Type":"ContainerStarted","Data":"19438d989974ab51c01d48999f748187e9597e1d5d1e2c7b9caadc17e806a20f"} Nov 28 13:53:44 crc kubenswrapper[4857]: I1128 13:53:44.297572 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 13:53:45 crc kubenswrapper[4857]: I1128 13:53:45.308815 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hc5mg" event={"ID":"a7e9e5a4-4fa3-4237-a530-a400153b72c8","Type":"ContainerStarted","Data":"b7576395029d62621935a92e5e06823cbecac4765885ab3714837b7bb484281a"} Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.318224 4857 generic.go:334] "Generic (PLEG): container finished" podID="a7e9e5a4-4fa3-4237-a530-a400153b72c8" containerID="b7576395029d62621935a92e5e06823cbecac4765885ab3714837b7bb484281a" exitCode=0 Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.322570 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hc5mg" event={"ID":"a7e9e5a4-4fa3-4237-a530-a400153b72c8","Type":"ContainerDied","Data":"b7576395029d62621935a92e5e06823cbecac4765885ab3714837b7bb484281a"} Nov 28 13:53:47 crc kubenswrapper[4857]: I1128 13:53:47.329198 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hc5mg" event={"ID":"a7e9e5a4-4fa3-4237-a530-a400153b72c8","Type":"ContainerStarted","Data":"c1725d1251bc0eb30719756f6740670f0cd71eb0dc7175c9c4981dd852ce1be7"} Nov 28 13:53:47 crc kubenswrapper[4857]: I1128 13:53:47.346101 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hc5mg" podStartSLOduration=2.889253152 podStartE2EDuration="5.346074866s" podCreationTimestamp="2025-11-28 13:53:42 +0000 UTC" firstStartedPulling="2025-11-28 13:53:44.297065082 +0000 UTC m=+2116.324440279" lastFinishedPulling="2025-11-28 13:53:46.753886826 +0000 UTC m=+2118.781261993" observedRunningTime="2025-11-28 13:53:47.344494701 +0000 UTC m=+2119.371869868" watchObservedRunningTime="2025-11-28 13:53:47.346074866 +0000 UTC m=+2119.373450033" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515112324563024447 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015112324563017364 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015112320110016467 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015112320111015440 5ustar corecore